diff --git a/DJAGEN/branches/mustafa_branch/00_default_vhost.conf b/DJAGEN/branches/mustafa_branch/00_default_vhost.conf new file mode 100755 index 0000000..ce876c4 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/00_default_vhost.conf @@ -0,0 +1,126 @@ +### Section 3: Virtual Hosts +# +# VirtualHost: If you want to maintain multiple domains/hostnames on your +# machine you can setup VirtualHost containers for them. Most configurations +# use only name-based virtual hosts so the server doesn't need to worry about +# IP addresses. This is indicated by the asterisks in the directives below. +# +# Please see the documentation at +# +# for further details before you try to setup virtual hosts. +# +# You may use the command line option '-S' to verify your virtual host +# configuration. +Listen 80 +# +# Use name-based virtual hosting. +# +NameVirtualHost *:80 + +# +# VirtualHost example: +# Almost any Apache directive may go into a VirtualHost container. +# The first VirtualHost section is used for requests without a known +# server name. +# +# +# ServerAdmin webmaster@dummy-host.example.com +# DocumentRoot /www/docs/dummy-host.example.com +# ServerName dummy-host.example.com +# ErrorLog @rel_logfiledir@/dummy-host.example.com-error_log +# CustomLog @rel_logfiledir@/dummy-host.example.com-access_log common +# + +# +# The First Virtual Host is also your DEFAULT Virtual Host. +# This means any requests that do not match any other vhosts will +# goto this virtual host. +# + + + + # + # DocumentRoot: The directory out of which you will serve your + # documents. By default, all requests are taken from this directory, but + # symbolic links and aliases may be used to point to other locations. + # + DocumentRoot "/var/www/localhost/htdocs" + + # + # This should be changed to whatever you set DocumentRoot to. + # + + + # + # Possible values for the Options directive are "None", "All", + # or any combination of: + # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews + # + # Note that "MultiViews" must be named *explicitly* --- "Options All" + # doesn't give it to you. + # + # The Options directive is both complicated and important. Please see + # http://httpd.apache.org/docs-2.0/mod/core.html#options + # for more information. + # + Options Indexes FollowSymLinks + + # + # AllowOverride controls what directives may be placed in .htaccess files. + # It can be "All", "None", or any combination of the keywords: + # Options FileInfo AuthConfig Limit + # + AllowOverride None + + # + # Controls who can get stuff from this server. + # + Order allow,deny + Allow from all + + + + # this must match a Processor + ServerEnvironment apache apache + + # these are optional - defaults to the values specified in httpd.conf + MinSpareProcessors 4 + MaxProcessors 20 + + + + + + ServerName / + ServerAlias */ +Alias /phpmyadmin/ /var/www/localhost/htdocs/phpmyadmin/ + +order deny,allow +Allow from all + + +Alias /djagenmedia/ /var/www/localhost/htdocs/djagen/ + +order deny,allow +Allow from all + + +Alias /admin_media/ /usr/lib/python2.5/site-packages/django/contrib/admin/media + +order deny,allow +Allow from all + + +WSGIScriptAlias / /home/cad/Workspace/djagen_ws/gezegen/branches/mustafa_branch/djagen/wsgi_handler.py +WSGIDaemonProcess djagen user=cad group=root processes=1 threads=10 +WSGIProcessGroup djagen + + + +Order deny,allow +Allow from all + + + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/__init__.py new file mode 100755 index 0000000..ac47d9a --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/__init__.py @@ -0,0 +1,12 @@ +VERSION = (0, 1, 7) + +def get_version(svn=False): + "Returns the version as a human-format string." + v = '.'.join([str(i) for i in VERSION]) + if svn: + from django.utils.version import get_svn_revision + import os + svn_rev = get_svn_revision(os.path.dirname(__file__)) + if svn_rev: + v = '%s-%s' % (v, svn_rev) + return v diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/conf/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/conf/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/conf/settings.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/conf/settings.py new file mode 100755 index 0000000..ddfe82f --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/conf/settings.py @@ -0,0 +1,49 @@ +import os +from django.conf import settings + +CAPTCHA_FONT_PATH = getattr(settings,'CAPTCHA_FONT_PATH', os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'fonts/Vera.ttf'))) +CAPTCHA_FONT_SIZE = getattr(settings,'CAPTCHA_FONT_SIZE', 22) +CAPTCHA_LETTER_ROTATION = getattr(settings, 'CAPTCHA_LETTER_ROTATION', (-35,35)) +CAPTCHA_BACKGROUND_COLOR = getattr(settings,'CAPTCHA_BACKGROUND_COLOR', '#ffffff') +CAPTCHA_FOREGROUND_COLOR= getattr(settings,'CAPTCHA_FOREGROUND_COLOR', '#001100') +CAPTCHA_CHALLENGE_FUNCT = getattr(settings,'CAPTCHA_CHALLENGE_FUNCT','captcha.helpers.random_char_challenge') +CAPTCHA_NOISE_FUNCTIONS = getattr(settings,'CAPTCHA_NOISE_FUNCTIONS', ('captcha.helpers.noise_arcs','captcha.helpers.noise_dots',)) +CAPTCHA_FILTER_FUNCTIONS = getattr(settings,'CAPTCHA_FILTER_FUNCTIONS',('captcha.helpers.post_smooth',)) +CAPTCHA_WORDS_DICTIONARY = getattr(settings,'CAPTCHA_WORDS_DICTIONARY', '/usr/share/dict/words') +CAPTCHA_FLITE_PATH = getattr(settings,'CAPTCHA_FLITE_PATH',None) +CAPTCHA_TIMEOUT = getattr(settings, 'CAPTCHA_TIMEOUT', 5) # Minutes +CAPTCHA_LENGTH = int(getattr(settings, 'CAPTCHA_LENGTH', 4)) # Chars +CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings,'CAPTCHA_IMAGE_BEFORE_FIELD', True) +CAPTCHA_DICTIONARY_MIN_LENGTH = getattr(settings,'CAPTCHA_DICTIONARY_MIN_LENGTH', 0) +CAPTCHA_DICTIONARY_MAX_LENGTH = getattr(settings,'CAPTCHA_DICTIONARY_MAX_LENGTH', 99) +if CAPTCHA_IMAGE_BEFORE_FIELD: + CAPTCHA_OUTPUT_FORMAT = getattr(settings,'CAPTCHA_OUTPUT_FORMAT', u'%(image)s %(hidden_field)s %(text_field)s') +else: + CAPTCHA_OUTPUT_FORMAT = getattr(settings,'CAPTCHA_OUTPUT_FORMAT', u'%(hidden_field)s %(text_field)s %(image)s') + + +# Failsafe +if CAPTCHA_DICTIONARY_MIN_LENGTH > CAPTCHA_DICTIONARY_MAX_LENGTH: + CAPTCHA_DICTIONARY_MIN_LENGTH, CAPTCHA_DICTIONARY_MAX_LENGTH = CAPTCHA_DICTIONARY_MAX_LENGTH, CAPTCHA_DICTIONARY_MIN_LENGTH + + +def _callable_from_string(string_or_callable): + if callable(string_or_callable): + return string_or_callable + else: + return getattr(__import__( '.'.join(string_or_callable.split('.')[:-1]), {}, {}, ['']), string_or_callable.split('.')[-1]) + +def get_challenge(): + return _callable_from_string(CAPTCHA_CHALLENGE_FUNCT) + + +def noise_functions(): + if CAPTCHA_NOISE_FUNCTIONS: + return map(_callable_from_string, CAPTCHA_NOISE_FUNCTIONS) + return list() + +def filter_functions(): + if CAPTCHA_FILTER_FUNCTIONS: + return map(_callable_from_string, CAPTCHA_FILTER_FUNCTIONS) + return list() + diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/fields.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/fields.py new file mode 100755 index 0000000..7df0f03 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/fields.py @@ -0,0 +1,81 @@ +from django.forms.fields import CharField, MultiValueField +from django.forms import ValidationError +from django.forms.widgets import TextInput, MultiWidget, HiddenInput +from django.utils.safestring import mark_safe +from django.utils.translation import ugettext_lazy as _ +from django.core.urlresolvers import reverse +from captcha.models import CaptchaStore +from captcha.conf import settings +from captcha.helpers import * +import datetime + +class CaptchaTextInput(MultiWidget): + def __init__(self,attrs=None): + widgets = ( + HiddenInput(attrs), + TextInput(attrs), + ) + + for key in ('image','hidden_field','text_field'): + if '%%(%s)s'%key not in settings.CAPTCHA_OUTPUT_FORMAT: + raise KeyError('All of %s must be present in your CAPTCHA_OUTPUT_FORMAT setting. Could not find %s' %( + ', '.join(['%%(%s)s'%k for k in ('image','hidden_field','text_field')]), + '%%(%s)s'%key + )) + + super(CaptchaTextInput,self).__init__(widgets,attrs) + + def decompress(self,value): + if value: + return value.split(',') + return [None,None] + + def format_output(self, rendered_widgets): + hidden_field, text_field = rendered_widgets + return settings.CAPTCHA_OUTPUT_FORMAT %dict(image=self.image_and_audio, hidden_field=hidden_field, text_field=text_field) + + def render(self, name, value, attrs=None): + challenge,response= settings.get_challenge()() + + store = CaptchaStore.objects.create(challenge=challenge,response=response) + key = store.hashkey + value = [key, u''] + + self.image_and_audio = 'captcha' %reverse('captcha-image',kwargs=dict(key=key)) + if settings.CAPTCHA_FLITE_PATH: + self.image_and_audio = '%s' %( reverse('captcha-audio', kwargs=dict(key=key)), unicode(_('Play captcha as audio file')), self.image_and_audio) + #fields = super(CaptchaTextInput, self).render(name, value, attrs=attrs) + + return super(CaptchaTextInput, self).render(name, value, attrs=attrs) + +class CaptchaField(MultiValueField): + widget=CaptchaTextInput + + def __init__(self, *args,**kwargs): + fields = ( + CharField(show_hidden_initial=True), + CharField(), + ) + if 'error_messages' not in kwargs or 'invalid' not in kwargs.get('error_messages'): + if 'error_messages' not in kwargs: + kwargs['error_messages'] = dict() + kwargs['error_messages'].update(dict(invalid=_('Invalid CAPTCHA'))) + + + super(CaptchaField,self).__init__(fields=fields, *args, **kwargs) + + def compress(self,data_list): + if data_list: + return ','.join(data_list) + return None + + def clean(self, value): + super(CaptchaField, self).clean(value) + response, value[1] = value[1].strip().lower(), '' + CaptchaStore.remove_expired() + try: + store = CaptchaStore.objects.get(response=response, hashkey=value[0], expiration__gt=datetime.datetime.now()) + store.delete() + except Exception: + raise ValidationError(getattr(self,'error_messages',dict()).get('invalid', _('Invalid CAPTCHA'))) + return value diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/fonts/COPYRIGHT.TXT b/DJAGEN/branches/mustafa_branch/djagen/captcha/fonts/COPYRIGHT.TXT new file mode 100755 index 0000000..e651be1 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/fonts/COPYRIGHT.TXT @@ -0,0 +1,124 @@ +Bitstream Vera Fonts Copyright + +The fonts have a generous copyright, allowing derivative works (as +long as "Bitstream" or "Vera" are not in the names), and full +redistribution (so long as they are not *sold* by themselves). They +can be be bundled, redistributed and sold with any software. + +The fonts are distributed under the following copyright: + +Copyright +========= + +Copyright (c) 2003 by Bitstream, Inc. All Rights Reserved. Bitstream +Vera is a trademark of Bitstream, Inc. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the fonts accompanying this license ("Fonts") and associated +documentation files (the "Font Software"), to reproduce and distribute +the Font Software, including without limitation the rights to use, +copy, merge, publish, distribute, and/or sell copies of the Font +Software, and to permit persons to whom the Font Software is furnished +to do so, subject to the following conditions: + +The above copyright and trademark notices and this permission notice +shall be included in all copies of one or more of the Font Software +typefaces. + +The Font Software may be modified, altered, or added to, and in +particular the designs of glyphs or characters in the Fonts may be +modified and additional glyphs or characters may be added to the +Fonts, only if the fonts are renamed to names not containing either +the words "Bitstream" or the word "Vera". + +This License becomes null and void to the extent applicable to Fonts +or Font Software that has been modified and is distributed under the +"Bitstream Vera" names. + +The Font Software may be sold as part of a larger software package but +no copy of one or more of the Font Software typefaces may be sold by +itself. + +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL +BITSTREAM OR THE GNOME FOUNDATION BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, +OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT +SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE. + +Except as contained in this notice, the names of Gnome, the Gnome +Foundation, and Bitstream Inc., shall not be used in advertising or +otherwise to promote the sale, use or other dealings in this Font +Software without prior written authorization from the Gnome Foundation +or Bitstream Inc., respectively. For further information, contact: +fonts at gnome dot org. + +Copyright FAQ +============= + + 1. I don't understand the resale restriction... What gives? + + Bitstream is giving away these fonts, but wishes to ensure its + competitors can't just drop the fonts as is into a font sale system + and sell them as is. It seems fair that if Bitstream can't make money + from the Bitstream Vera fonts, their competitors should not be able to + do so either. You can sell the fonts as part of any software package, + however. + + 2. I want to package these fonts separately for distribution and + sale as part of a larger software package or system. Can I do so? + + Yes. A RPM or Debian package is a "larger software package" to begin + with, and you aren't selling them independently by themselves. + See 1. above. + + 3. Are derivative works allowed? + Yes! + + 4. Can I change or add to the font(s)? + Yes, but you must change the name(s) of the font(s). + + 5. Under what terms are derivative works allowed? + + You must change the name(s) of the fonts. This is to ensure the + quality of the fonts, both to protect Bitstream and Gnome. We want to + ensure that if an application has opened a font specifically of these + names, it gets what it expects (though of course, using fontconfig, + substitutions could still could have occurred during font + opening). You must include the Bitstream copyright. Additional + copyrights can be added, as per copyright law. Happy Font Hacking! + + 6. If I have improvements for Bitstream Vera, is it possible they might get + adopted in future versions? + + Yes. The contract between the Gnome Foundation and Bitstream has + provisions for working with Bitstream to ensure quality additions to + the Bitstream Vera font family. Please contact us if you have such + additions. Note, that in general, we will want such additions for the + entire family, not just a single font, and that you'll have to keep + both Gnome and Jim Lyles, Vera's designer, happy! To make sense to add + glyphs to the font, they must be stylistically in keeping with Vera's + design. Vera cannot become a "ransom note" font. Jim Lyles will be + providing a document describing the design elements used in Vera, as a + guide and aid for people interested in contributing to Vera. + + 7. I want to sell a software package that uses these fonts: Can I do so? + + Sure. Bundle the fonts with your software and sell your software + with the fonts. That is the intent of the copyright. + + 8. If applications have built the names "Bitstream Vera" into them, + can I override this somehow to use fonts of my choosing? + + This depends on exact details of the software. Most open source + systems and software (e.g., Gnome, KDE, etc.) are now converting to + use fontconfig (see www.fontconfig.org) to handle font configuration, + selection and substitution; it has provisions for overriding font + names and subsituting alternatives. An example is provided by the + supplied local.conf file, which chooses the family Bitstream Vera for + "sans", "serif" and "monospace". Other software (e.g., the XFree86 + core server) has other mechanisms for font substitution. + diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/fonts/README.TXT b/DJAGEN/branches/mustafa_branch/djagen/captcha/fonts/README.TXT new file mode 100755 index 0000000..0f71795 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/fonts/README.TXT @@ -0,0 +1,11 @@ +Contained herin is the Bitstream Vera font family. + +The Copyright information is found in the COPYRIGHT.TXT file (along +with being incoporated into the fonts themselves). + +The releases notes are found in the file "RELEASENOTES.TXT". + +We hope you enjoy Vera! + + Bitstream, Inc. + The Gnome Project diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/fonts/Vera.ttf b/DJAGEN/branches/mustafa_branch/djagen/captcha/fonts/Vera.ttf new file mode 100755 index 0000000..58cd6b5 Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/captcha/fonts/Vera.ttf differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/helpers.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/helpers.py new file mode 100755 index 0000000..b400700 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/helpers.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +import random +from captcha.conf import settings + +def math_challenge(): + operators = ('+','*','-',) + operands = (random.randint(1,10),random.randint(1,10)) + operator = random.choice(operators) + if operands[0] < operands[1] and '-' == operator: + operands = (operands[1],operands[0]) + challenge = '%d%s%d' %(operands[0],operator,operands[1]) + return u'%s=' %(challenge), unicode(eval(challenge)) + +def random_char_challenge(): + chars,ret = u'abcdefghijklmnopqrstuvwxyz', u'' + for i in range(settings.CAPTCHA_LENGTH): + ret += random.choice(chars) + return ret.upper(),ret + +def unicode_challenge(): + chars,ret = u'äàáëéèïíîöóòüúù', u'' + for i in range(settings.CAPTCHA_LENGTH): + ret += random.choice(chars) + return ret.upper(), ret + +def word_challenge(): + fd = file(settings.CAPTCHA_WORDS_DICTIONARY,'rb') + l = fd.readlines() + fd.close() + while True: + word = random.choice(l).strip() + if len(word) >= settings.CAPTCHA_DICTIONARY_MIN_LENGTH and len(word) <= settings.CAPTCHA_DICTIONARY_MAX_LENGTH: + break + return word.upper(), word.lower() + +def noise_arcs(draw,image): + size = image.size + draw.arc([-20,-20, size[0],20], 0, 295, fill=settings.CAPTCHA_FOREGROUND_COLOR) + draw.line([-20,20, size[0]+20,size[1]-20], fill=settings.CAPTCHA_FOREGROUND_COLOR) + draw.line([-20,0, size[0]+20,size[1]], fill=settings.CAPTCHA_FOREGROUND_COLOR) + return draw + +def noise_dots(draw,image): + size = image.size + for p in range(int(size[0]*size[1]*0.1)): + draw.point((random.randint(0, size[0]),random.randint(0, size[1])), fill=settings.CAPTCHA_FOREGROUND_COLOR ) + return draw + +def post_smooth(image): + import ImageFilter + return image.filter(ImageFilter.SMOOTH) diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/management/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/management/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/management/commands/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/management/commands/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/management/commands/captcha_clean.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/management/commands/captcha_clean.py new file mode 100755 index 0000000..9a66e48 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/management/commands/captcha_clean.py @@ -0,0 +1,28 @@ +from django.core.management.base import BaseCommand, CommandError +import sys + +from optparse import make_option + +class Command(BaseCommand): + help = "Clean up expired captcha hashkeys." + + def handle(self, **options): + from captcha.models import CaptchaStore + import datetime + verbose = int(options.get('verbosity')) + expired_keys = CaptchaStore.objects.filter(expiration__lte=datetime.datetime.now()).count() + if verbose >= 1: + print "Currently %s expired hashkeys" % expired_keys + try: + CaptchaStore.remove_expired() + except: + if verbose >= 1 : + print "Unable to delete expired hashkeys." + sys.exit(1) + if verbose >= 1: + if expired_keys > 0: + print "Expired hashkeys removed." + else: + print "No keys to remove." + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/models.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/models.py new file mode 100755 index 0000000..fc8c599 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/models.py @@ -0,0 +1,46 @@ +from django.db import models +from captcha.conf import settings as captcha_settings +import datetime, unicodedata, random, time + +# Heavily based on session key generation in Django +# Use the system (hardware-based) random number generator if it exists. +if hasattr(random, 'SystemRandom'): + randrange = random.SystemRandom().randrange +else: + randrange = random.randrange +MAX_RANDOM_KEY = 18446744073709551616L # 2 << 63 + + +try: + import hashlib # sha for Python 2.5+ +except ImportError: + import sha # sha for Python 2.4 (deprecated in Python 2.6) + hashlib = False + +class CaptchaStore(models.Model): + challenge = models.CharField(blank=False, max_length=32) + response = models.CharField(blank=False, max_length=32) + hashkey = models.CharField(blank=False, max_length=40, unique=True) + expiration = models.DateTimeField(blank=False) + + def save(self,*args,**kwargs): + self.response = self.response.lower() + if not self.expiration: + self.expiration = datetime.datetime.now() + datetime.timedelta(minutes= int(captcha_settings.CAPTCHA_TIMEOUT)) + if not self.hashkey: + key_ = unicodedata.normalize('NFKD', str(randrange(0,MAX_RANDOM_KEY)) + str(time.time()) + unicode(self.challenge)).encode('ascii', 'ignore') + unicodedata.normalize('NFKD', unicode(self.response)).encode('ascii', 'ignore') + if hashlib: + self.hashkey = hashlib.new('sha', key_).hexdigest() + else: + self.hashkey = sha.new(key_).hexdigest() + del(key_) + super(CaptchaStore,self).save(*args,**kwargs) + + def __unicode__(self): + return self.challenge + + + def remove_expired(cls): + cls.objects.filter(expiration__lte=datetime.datetime.now()).delete() + remove_expired = classmethod(remove_expired) + diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/tests/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/tests/__init__.py new file mode 100755 index 0000000..ded5948 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/tests/__init__.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +from captcha.conf import settings +from captcha.models import CaptchaStore +from django.core.urlresolvers import reverse +from django.test import TestCase +from django.utils.translation import ugettext_lazy as _ +import datetime + + +class CaptchaCase(TestCase): + urls = 'captcha.tests.urls' + + def setUp(self): + self.default_challenge = settings.get_challenge()() + self.math_challenge = settings._callable_from_string('captcha.helpers.math_challenge')() + self.chars_challenge = settings._callable_from_string('captcha.helpers.random_char_challenge')() + self.unicode_challenge = settings._callable_from_string('captcha.helpers.unicode_challenge')() + + self.default_store, created = CaptchaStore.objects.get_or_create(challenge=self.default_challenge[0],response=self.default_challenge[1]) + self.math_store, created = CaptchaStore.objects.get_or_create(challenge=self.math_challenge[0],response=self.math_challenge[1]) + self.chars_store, created = CaptchaStore.objects.get_or_create(challenge=self.chars_challenge[0],response=self.chars_challenge[1]) + self.unicode_store, created = CaptchaStore.objects.get_or_create(challenge=self.unicode_challenge[0],response=self.unicode_challenge[1]) + + + + + def testImages(self): + for key in (self.math_store.hashkey, self.chars_store.hashkey, self.default_store.hashkey, self.unicode_store.hashkey): + response = self.client.get(reverse('captcha-image',kwargs=dict(key=key))) + self.failUnlessEqual(response.status_code, 200) + self.assertTrue(response.has_header('content-type')) + self.assertEquals(response._headers.get('content-type'), ('Content-Type', 'image/png')) + + def testAudio(self): + if not settings.CAPTCHA_FLITE_PATH: + return + for key in (self.math_store.hashkey, self.chars_store.hashkey, self.default_store.hashkey, self.unicode_store.hashkey): + response = self.client.get(reverse('captcha-audio',kwargs=dict(key=key))) + self.failUnlessEqual(response.status_code, 200) + self.assertTrue(len(response.content) > 1024) + self.assertTrue(response.has_header('content-type')) + self.assertEquals(response._headers.get('content-type'), ('Content-Type', 'audio/x-wav')) + + def testFormSubmit(self): + r = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r.status_code, 200) + hash_ = r.content[r.content.find('value="')+7:r.content.find('value="')+47] + try: + response = CaptchaStore.objects.get(hashkey=hash_).response + except: + self.fail() + + r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r.status_code, 200) + self.assertTrue(r.content.find('Form validated') > 0) + + r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r.status_code, 200) + self.assertFalse(r.content.find('Form validated') > 0) + + + + def testWrongSubmit(self): + r = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r.status_code, 200) + r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc',captcha_1='wrong response', subject='xxx', sender='asasd@asdasd.com')) + self.assertFormError(r,'form','captcha',_('Invalid CAPTCHA')) + + def testDeleteExpired(self): + self.default_store.expiration = datetime.datetime.now() - datetime.timedelta(minutes=5) + self.default_store.save() + hash_ = self.default_store.hashkey + r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=self.default_store.response, subject='xxx', sender='asasd@asdasd.com')) + + self.failUnlessEqual(r.status_code, 200) + self.assertFalse(r.content.find('Form validated') > 0) + + # expired -> deleted + try: + CaptchaStore.objects.get(hashkey=hash_) + self.fail() + except: + pass + + def testCustomErrorMessage(self): + r = self.client.get(reverse('captcha-test-custom-error-message')) + self.failUnlessEqual(r.status_code, 200) + + # Wrong answer + r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc',captcha_1='wrong response')) + self.assertFormError(r,'form','captcha','TEST CUSTOM ERROR MESSAGE') + # empty answer + r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc',captcha_1='')) + self.assertFormError(r,'form','captcha',_('This field is required.')) + + def testRepeatedChallenge(self): + store = CaptchaStore.objects.create(challenge='xxx',response='xxx') + try: + store2 = CaptchaStore.objects.create(challenge='xxx',response='xxx') + except Exception: + self.fail() + + + def testRepeatedChallengeFormSubmit(self): + settings.CAPTCHA_CHALLENGE_FUNCT = 'captcha.tests.trivial_challenge' + + r1 = self.client.get(reverse('captcha-test')) + r2 = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r1.status_code, 200) + self.failUnlessEqual(r2.status_code, 200) + hash_1 = r1.content[r1.content.find('value="')+7:r1.content.find('value="')+47] + hash_2 = r2.content[r2.content.find('value="')+7:r2.content.find('value="')+47] + try: + store_1 = CaptchaStore.objects.get(hashkey=hash_1) + store_2 = CaptchaStore.objects.get(hashkey=hash_2) + except: + self.fail() + + self.assertTrue(store_1.pk != store_2.pk) + self.assertTrue(store_1.response == store_2.response) + self.assertTrue(hash_1 != hash_2) + + + + r1 = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_1,captcha_1=store_1.response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r1.status_code, 200) + self.assertTrue(r1.content.find('Form validated') > 0) + + try: + store_2 = CaptchaStore.objects.get(hashkey=hash_2) + except: + self.fail() + + r2 = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_2,captcha_1=store_2.response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r2.status_code, 200) + self.assertTrue(r2.content.find('Form validated') > 0) + + def testOutputFormat(self): + settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s

Hello, captcha world

%(hidden_field)s%(text_field)s' + r = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r.status_code, 200) + self.assertTrue('

Hello, captcha world

' in r.content) + + def testInvalidOutputFormat(self): + settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s' + try: + r = self.client.get(reverse('captcha-test')) + self.fail() + except KeyError: + pass + +def trivial_challenge(): + return 'trivial','trivial' diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/tests/urls.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/tests/urls.py new file mode 100755 index 0000000..78b6ee3 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/tests/urls.py @@ -0,0 +1,6 @@ +from django.conf.urls.defaults import * +urlpatterns = patterns('', + url(r'test/$','captcha.tests.views.test',name='captcha-test'), + url(r'test2/$','captcha.tests.views.test_custom_error_message',name='captcha-test-custom-error-message'), + url(r'',include('captcha.urls')), +) diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/tests/views.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/tests/views.py new file mode 100755 index 0000000..8b836c1 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/tests/views.py @@ -0,0 +1,58 @@ +from django import forms +from captcha.fields import CaptchaField +from django.template import Context, RequestContext, loader +from django.http import HttpResponse + + +TEST_TEMPLATE = r''' + + + + + captcha test + + + {% if passed %} +

Form validated

+ {% endif %} +
+ {{form.as_p}} +

+
+ + +''' + +def test(request): + + class CaptchaTestForm(forms.Form): + subject = forms.CharField(max_length=100) + sender = forms.EmailField() + captcha = CaptchaField(help_text='asdasd') + + if request.POST: + form = CaptchaTestForm(request.POST) + if form.is_valid(): + passed = True + else: + form = CaptchaTestForm() + + t = loader.get_template_from_string(TEST_TEMPLATE) + return HttpResponse(t.render(RequestContext(request, locals()))) + + +def test_custom_error_message(request): + + class CaptchaTestForm(forms.Form): + captcha = CaptchaField(help_text='asdasd', error_messages=dict(invalid='TEST CUSTOM ERROR MESSAGE')) + + if request.POST: + form = CaptchaTestForm(request.POST) + if form.is_valid(): + passed = True + else: + form = CaptchaTestForm() + + t = loader.get_template_from_string(TEST_TEMPLATE) + return HttpResponse(t.render(RequestContext(request, locals()))) diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/urls.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/urls.py new file mode 100755 index 0000000..c458668 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/urls.py @@ -0,0 +1,6 @@ +from django.conf.urls.defaults import * + +urlpatterns = patterns('captcha.views', + url(r'image/(?P\w+)/$','captcha_image',name='captcha-image'), + url(r'audio/(?P\w+)/$','captcha_audio',name='captcha-audio'), +) diff --git a/DJAGEN/branches/mustafa_branch/djagen/captcha/views.py b/DJAGEN/branches/mustafa_branch/djagen/captcha/views.py new file mode 100755 index 0000000..fec51f7 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/captcha/views.py @@ -0,0 +1,92 @@ +from cStringIO import StringIO +from captcha.models import CaptchaStore +from django.http import HttpResponse, Http404 +from django.shortcuts import get_object_or_404 +import Image,ImageDraw,ImageFont,ImageFilter,random +from captcha.conf import settings +import re + +NON_DIGITS_RX = re.compile('[^\d]') + +def captcha_image(request,key): + store = get_object_or_404(CaptchaStore,hashkey=key) + text=store.challenge + + if settings.CAPTCHA_FONT_PATH.lower().strip().endswith('ttf'): + font = ImageFont.truetype(settings.CAPTCHA_FONT_PATH,settings.CAPTCHA_FONT_SIZE) + else: + font = ImageFont.load(settings.CAPTCHA_FONT_PATH) + + size = font.getsize(text) + size = (size[0]*2,size[1]) + image = Image.new('RGB', size , settings.CAPTCHA_BACKGROUND_COLOR) + + try: + PIL_VERSION = int(NON_DIGITS_RX.sub('',Image.VERSION)) + except: + PIL_VERSION = 116 + + + + xpos = 2 + for char in text: + fgimage = Image.new('RGB', size, settings.CAPTCHA_FOREGROUND_COLOR) + charimage = Image.new('L', font.getsize(' %s '%char), '#000000') + chardraw = ImageDraw.Draw(charimage) + chardraw.text((0,0), ' %s '%char, font=font, fill='#ffffff') + if settings.CAPTCHA_LETTER_ROTATION: + if PIL_VERSION >= 116: + charimage = charimage.rotate(random.randrange( *settings.CAPTCHA_LETTER_ROTATION ), expand=0, resample=Image.BICUBIC) + else: + charimage = charimage.rotate(random.randrange( *settings.CAPTCHA_LETTER_ROTATION ), resample=Image.BICUBIC) + charimage = charimage.crop(charimage.getbbox()) + maskimage = Image.new('L', size) + + maskimage.paste(charimage, (xpos, 4, xpos+charimage.size[0], 4+charimage.size[1] )) + size = maskimage.size + image = Image.composite(fgimage, image, maskimage) + xpos = xpos + 2 + charimage.size[0] + + image = image.crop((0,0,xpos+1,size[1])) + draw = ImageDraw.Draw(image) + + for f in settings.noise_functions(): + draw = f(draw,image) + for f in settings.filter_functions(): + image = f(image) + + out = StringIO() + image.save(out,"PNG") + out.seek(0) + + response = HttpResponse() + response['Content-Type'] = 'image/png' + response.write(out.read()) + + return response + +def captcha_audio(request,key): + if settings.CAPTCHA_FLITE_PATH: + store = get_object_or_404(CaptchaStore,hashkey=key) + text=store.challenge + if 'captcha.helpers.math_challenge' == settings.CAPTCHA_CHALLENGE_FUNCT: + text = text.replace('*','times').replace('-','minus') + else: + text = ', '.join(list(text)) + + import tempfile, os + + path = str(os.path.join(tempfile.gettempdir(),'%s.wav' %key)) + cline = '%s -t "%s" -o "%s"' %(settings.CAPTCHA_FLITE_PATH, text, path) + + os.popen(cline).read() + if os.path.isfile(path): + response = HttpResponse() + f = open(path,'rb') + response['Content-Type'] = 'audio/x-wav' + response.write(f.read()) + f.close() + os.unlink(path) + return response + + raise Http404 diff --git a/DJAGEN/branches/mustafa_branch/djagen/collector/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/collector/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/mustafa_branch/djagen/collector/admin.py b/DJAGEN/branches/mustafa_branch/djagen/collector/admin.py new file mode 100755 index 0000000..d2125b6 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/collector/admin.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django.contrib import admin +from djagen.collector.models import * + +from django.conf import settings + +import os +import datetime +import shutil + +from djagen.collector.configini import * + +class AuthorsAdmin (admin.ModelAdmin): + + list_display = ('author_id', 'author_name', 'author_email', 'author_face', 'current_status', 'is_approved', 'label_personal', 'label_lkd', 'label_community', 'label_eng') + list_select_related = True + + search_fields = ['author_name', 'author_surname', 'author_email'] + + def save_model(self, request, obj, form, change): + + #get the values for saving + author_name = obj.author_name + author_surname = obj.author_surname + author_face = obj.author_face + channel_url = obj.channel_url + + current_status = obj.current_status + is_approved = obj.is_approved + + #creating the history + now = datetime.datetime.now() + action_type = current_status + + author_id = obj.author_id + if author_id: + #then this is an update + author = Authors.objects.get(author_id = author_id) + pre_status = author.is_approved + current_status = obj.is_approved + obj.save() + else: + obj.save() + author = Authors.objects.get(author_name=author_name, author_surname=author_surname, channel_url=channel_url) + pre_status = None + current_status = author.is_approved + + author.history_set.create(action_type=action_type, action_date=now, action_owner=request.user.username) + + + #create tmp_config.ini here + handler = Handler(author.author_id) + handler.create_tmp_entries() + + if pre_status != current_status: + a_face = author.author_face + + images_path = os.path.join(settings.MAIN_PATH, 'www', 'images') + heads_path = os.path.join(images_path, 'heads') + face_path = os.path.join(heads_path, a_face) + + tmp_image_path = os.path.join(settings.MAIN_PATH, 'temp_ini', a_face) + + if os.path.exists(tmp_image_path): + shutil.move(tmp_image_path, face_path) + +class HistoryAdmin(admin.ModelAdmin): + list_display = ('action_type', 'action_date', 'action_author', 'action_owner') + +admin.site.register(History, HistoryAdmin) +admin.site.register(Authors, AuthorsAdmin) + diff --git a/DJAGEN/branches/mustafa_branch/djagen/collector/configini.py b/DJAGEN/branches/mustafa_branch/djagen/collector/configini.py new file mode 100755 index 0000000..af4f7ee --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/collector/configini.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +from django.conf import settings +from djagen.collector.models import * +import ConfigParser + +class Handler: + + def __init__(self, id): + + self.id = id + + self.tmp_entries_ini = os.path.join(settings.MAIN_PATH, 'tmp_ini', 'tmp_entries.ini') + + self.config_entries_ini = os.path.join(settings.MAIN_PATH, 'gezegen', 'config_entries.ini') + + def __set_values(self): + + author = Authors.objects.get(author_id = self.id) + + if not author.is_approved: + return False + + self.name = author.author_name + ' ' + author.author_surname + self.face = author.author_face + self.url = author.channel_url + + labels = {author.label_personal:'Personal', author.label_lkd: 'LKD', author.label_community: 'Community', author.label_eng: 'Eng'} + + label_li = [k for k,v in labels.iteritems() if v==1] + self.author_labels = " ".join(label_li) + + return True + + def create_tmp_entries(self): + + if not self.__set_values(): return + + config_entries = open(self.config_entries_ini) + tmp_entries = open(self.tmp_entries_ini, 'w') + + Config = ConfigParser.ConfigParser() + Config.read(self.config_entries_ini) + sections = Config.sections() + + for section in sections: + + config_name = Config.get(section, 'name') + config_label = Config.get(section, 'label') + config_id = Config.get(section, 'id') + config_url = section + + try: + config_face = Config.get(section, 'face') + except: + config_face = None + + if config_id == self.id: + + url = self.url + face = self.face + name = self.name + label = self.author_labels + id = self.id + + else: + + url = config_url + face = config_face + name = config_name + label = config_label + id = config_id + + s = url + '\n' + s += 'name = ' + name + '\n' + s += 'label = ' + label + '\n' + if face: + s += 'face = ' + face + '\n' + s += 'id = ' + id + '\n' + '\n' + + tmp_entries.write(s) + + tmp_entries.close() + + + + + + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/collector/configxml.py b/DJAGEN/branches/mustafa_branch/djagen/collector/configxml.py new file mode 100755 index 0000000..e952792 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/collector/configxml.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +from xml.dom import minidom + +class Handler: + + def __init__(self): + + self.main_url = "/home/oguz/django-projects/djagen/gezegen" + self.gezegen_url = os.path.join(self.main_url,"gezegen") + self.entries_xml = os.path.join(self.gezegen_url, "config_entries.xml") + self.header_xml = os.path.join(self.gezegen_url, 'config_header.xml') + self.tmp_ini_dir_path = os.path.join(self.main_url, "tmp_ini") + + def get_doc(self, type="entries"): + + if type == "entries": + self.doc = minidom.parse(self.entries_xml) + else: + self.doc = minidom.parse(self.header_xml) + return self.doc + + def get_tag_entries(self,tag): + + self.entries = self.doc.getElementsByTagName(tag) + return self.entries + + def set_ini_variables(self, id, name, feed, nick, face, label): + + self.tmp_ini = {'id': id, 'name': name, 'feed': feed, 'nick': nick, 'face': face, 'label': label} + + def open_file(self): + path = os.path.join(self.tmp_ini_dir_path, 'tmp.ini') + self.f = open(path, "w") + + def create_header(self): + + for header in self.entries: + + children = header.childNodes + for child in children: + if child.nodeType == child.TEXT_NODE: continue + else: + node_name = child.nodeName + f_child = child.firstChild + node_value = f_child.nodeValue + + s = [] + if node_name != "header_name": + s.append(node_name) + s.append("=") + s.append(node_value) + s.append("\n") + ss = " ".join(s) + self.f.write(ss) + + def traverse(self): + + for entry in self.entries: + + nodes = entry.childNodes + + for node in nodes: + + child = node.firstChild + self.face = None + + if node.nodeType == node.TEXT_NODE: continue + + if node.nodeName == "feed": + self.feed = child.toxml() + + if node.nodeName == "name": + self.name = child.toxml() + + if node.nodeName == "nick": + self.nick = child.toxml() + + if node.nodeName == "label": + self.label = child.toxml() + + if node.nodeName == "face": + self.face = child.toxml() + + if node.nodeName == "id": + self.id = child.toxml() + + if int(self.tmp_ini['id']) == int(self.id): + + self.write_to_file(self.tmp_ini) + + else: + + config = {'id': self.id, 'name': self.name, 'feed': self.feed, 'nick': self.nick, 'label': self.label, 'face': self.face} + self.write_to_file(config) + + + def write_to_file(self, dic): + + feed = "feed = " + dic['feed'] + "\n" + name = "name = " + dic['name'] + "\n" + nick = "nick = " + dic['nick'] + "\n" + label = "label = " + dic['label'] + "\n" + id = "id = " + dic['id'] + "\n" + + self.f.write("\n") + self.f.write(feed) + self.f.write(name) + self.f.write(nick) + if dic['face']: + face = "face = " + dic['face'] + "\n" + self.f.write(face) + self.f.write(label) + self.f.write(id) + + def close_file(self): + self.f.close() + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/collector/forms.py b/DJAGEN/branches/mustafa_branch/djagen/collector/forms.py new file mode 100755 index 0000000..e15bf4e --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/collector/forms.py @@ -0,0 +1,21 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django import forms +from captcha.fields import CaptchaField + +class ContactForm(forms.Form): + + name = forms.CharField(max_length=25, required=True, error_messages={'required': 'Lütfen adınızı giriniz'}, label='Adınız') + surname = forms.CharField(max_length=25, required=True, error_messages={'required': 'Lütfen soyadınızı giriniz'}, label='Soyadınız') + email = forms.EmailField(required=True, error_messages={'required': 'Size ulaşabileceğimiz eposta adresinizi giriniz'}, label='Eposta Adresiniz') + hackergotchi = forms.FileField(required=False, label='Hacketgotchiniz', help_text='Max 80*80 pixellik Gezegende görünmesini istediğiniz fotoğrafınız') + feed = forms.URLField(required=True, label='Besleme adresiniz', help_text='Günlüğünüzün XML kaynağının adresi') + message = forms.CharField(required=False, label='İletişim Mesajınız', widget=forms.widgets.Textarea()) + #field for captcha + captcha = CaptchaField(label="Captcha Alanı", help_text='Gördüğünü karakterleri aynen yazınız', error_messages={'required': 'Hatalı yazdınız!'}) + +class QueryForm(forms.Form): + name = forms.CharField(max_length=25, required = False, label = 'Adı') + surname = forms.CharField(max_length=25, required = False, label = 'Soyadı') + text = forms.CharField(required = False, label = 'Aradığınız metin', widget = forms.widgets.Textarea() ) diff --git a/DJAGEN/branches/mustafa_branch/djagen/collector/models.py b/DJAGEN/branches/mustafa_branch/djagen/collector/models.py new file mode 100755 index 0000000..eee5269 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/collector/models.py @@ -0,0 +1,111 @@ +from django.db import models +import datetime, unicodedata, random, time +import re + +# Create your models here. +ACTION_CHOICES = ( + (1, u'Removed'), + (2, u'Approved'), + (3, u'Paused'), + (4, u'Readded'), + (5, u'Applied'), + (6, u'Editted') + ) + +class Authors (models.Model): + author_id = models.AutoField(primary_key=True, help_text="Author ID") + author_name = models.CharField(max_length=50, help_text="Author Name") + author_surname = models.CharField(max_length=50, help_text="Author Name") + #we dont keep emails at the config.ini files, this part should be entered at the admin page + author_email = models.EmailField(null=True, blank=True, help_text="Author Email Address") + #the png file name of the author + author_face = models.CharField(max_length=30, null=True, blank=True, help_text="Author Face Name") + channel_subtitle = models.TextField(null=True, blank=True, help_text="Channel Subtitle") + channel_title = models.TextField(null=True, blank=True, help_text="Channel Title") + #URL of the feed. + channel_url = models.URLField(help_text="Channel URL") + #Link to the original format feed + channel_link = models.URLField(null=True, blank=True, help_text="Channel Link") + channel_urlstatus = models.IntegerField(null=True, blank=True, help_text="Channel URL Status") + + #use this field to check whether the author is shown on the planet or not, like banned situations + current_status = models.SmallIntegerField(default=2, choices=ACTION_CHOICES, help_text="Current Status of the Author") + #whether the application to the planet is approved, the approved ones will be shown at the planet + is_approved = models.BooleanField(default=1, help_text="Approve Status of the Author") + + #planets that the channel belongs to + #at the config.ini the entries should be obe of the belows: + #label = Personal + #label = LKD + #label = Eng + #label = Community + label_personal = models.BooleanField(default=1, help_text="Channnels at the Personal Blog Page") + label_lkd = models.BooleanField(default=0, help_text="Channels that are belong to LKD Blogs") + label_community = models.BooleanField(default=0, help_text="Channels that are belong to some community blogs") + label_eng = models.BooleanField(default=0, help_text="Channels that have English entries") + #at the main page, lets just show personal and lkd for now, for communities lets ask them a special rss + + def __unicode__(self): + return u'%s %s' % (self.author_name, self.author_surname) + + class Meta: + #order according to the author_name, ascending + ordering = ['author_name'] + +# keep the history for the action that are done on the member urls +class History (models.Model): + action_type = models.SmallIntegerField(choices=ACTION_CHOICES) + action_date = models.DateTimeField() + action_explanation = models.TextField(help_text="Reason of Action", blank=True, null=True) + action_author = models.ForeignKey('Authors') + action_owner = models.CharField(max_length=20, help_text="The user who did the action") + + def __unicode__(self): + return str(self.action_type) + + class Meta: + #order descending, show the last actions at top + ordering = ['-action_date'] + +class Entries (models.Model): + id_hash = models.CharField(max_length=50, help_text="Hash of the ID", primary_key=True) + title = models.CharField(max_length=150, help_text="Entry Title") + content_html = models.TextField(help_text="Entry Orginal Content") + content_text = models.TextField(help_text="Entry Pure Text Content") + summary = models.TextField(help_text="Entry Summary", null=True, blank=True) + link = models.URLField(help_text="Link to Entry") + date = models.DateTimeField(help_text="Date of the entry") + entry_id = models.ForeignKey('Authors') + + def __unicode__(self): + + return self.title + + class Meta: + + ordering = ['-date'] + + + def sanitize(self, data): + p = re.compile(r'<[^<]*?/?>') + return p.sub('', data) + +class RunTime (models.Model): + run_time = models.DateTimeField(help_text="Run time of the planet script", auto_now=True) + + def __unicode__(self): + + return self.run_time + + class Meta: + + ordering = ['-run_time'] + + def get_run_time(self): + + dt = ".".join(map(lambda x: str(x), [self.run_time.day, self.run_time.month, self.run_time.year])) + hm = ":".join(map(lambda x: str(x), [self.run_time.hour, self.run_time.minute])) + + rslt = " ".join([dt, hm]) + return rslt + diff --git a/DJAGEN/branches/mustafa_branch/djagen/collector/views.py b/DJAGEN/branches/mustafa_branch/djagen/collector/views.py new file mode 100755 index 0000000..cd551ac --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/collector/views.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- + +# View definitions are created here. +from django.shortcuts import render_to_response +from django.http import HttpResponse,HttpResponseRedirect +from djagen.collector.models import * +from djagen.collector.forms import ContactForm, QueryForm +from djagen.collector.wrappers import render_response +from django.conf import settings +from django.utils.datastructures import MultiValueDictKeyError +import magic +import os +import datetime, time +from django.core.paginator import Paginator, EmptyPage, InvalidPage + +import string + +BASE_URL = settings.BASE_URL + +def main(request): + selected_entries = Entries.objects.select_related() + entries_list1 = selected_entries.filter(entry_id__label_personal = 1) + entries_list2 = selected_entries.filter(entry_id__label_lkd = 1) + entries_list3 = selected_entries.filter(entry_id__label_community = 1) + entries_list = entries_list1 | entries_list2 | entries_list3 + + # This setting gets the content truncated which contains more than words. + truncate_words = 250 + items_per_page = 25 + + #get the last run time + run_time = RunTime.objects.all()[0] + + #get the last entries' date + last_entry_date = Entries.objects.all()[0].date + day = datetime.timedelta(days=1) + last_date_li = [] + for x in xrange(6): + last_entry_date -= day + last_date_li.append(last_entry_date) + + return render_to_response('main/main.html' ,{ + 'entries_list':entries_list, + 'truncate_words':truncate_words, + 'items_per_page':repr(items_per_page), + 'run_time':run_time, + #'pag_entries_list':pag_entries_list, + 'BASE_URL': BASE_URL, + 'last_date_li': last_date_li, + }) +def member_subscribe(request): + if request.method == 'POST': + form = ContactForm(request.POST, request.FILES) + #return HttpResponse(str(request.FILES)) + if form.is_valid(): + human = True + try: + check = handle_uploaded_file(request.FILES['hackergotchi']) + except MultiValueDictKeyError: + check = (False, '') + + #save the author information + if check[0]: + f = request.FILES['hackergotchi'] + + #change the name of the file with the unique name created + f.name = check[1] + + author = Authors(author_name=request.POST['name'], author_surname=request.POST['surname'], author_email=request.POST['email'], channel_url=request.POST['feed'], author_face=f.name, is_approved=0, current_status=5) + else: + author = Authors(author_name=request.POST['name'], author_surname=request.POST['surname'], author_email=request.POST['email'], channel_url=request.POST['feed'], is_approved=0, current_status=5) + author.save() + + #save the history with explanation + author.history_set.create(action_type=5, action_date=datetime.datetime.now(), action_explanation=request.POST['message']) + #send mail part + #fill it here + return render_response(request, 'main/subscribe.html/',{'submit': 'done', 'BASE_URL': BASE_URL}) + else: + form = ContactForm() + return render_response(request, 'main/subscribe.html', {'form': form, 'BASE_URL': BASE_URL}) + +def handle_uploaded_file(f): + + if not f.name: return False + #lets create a unique name for the image + t = str(time.time()).split(".") + img_name = t[0] + t[1].f.name.split(".")[1] + f.name = img_name + path = os.path.join(settings.FILE_UPLOAD_TEMP_DIR, f.name) + + destination = open(path, 'wb+') + for chunk in f.chunks(): + destination.write(chunk) + destination.close() + + m = magic.open(magic.MAGIC_MIME) + m.load() + t = m.file(path) + if t.split('/')[0] == 'image': + return (True, f.name) + else: + os.unlink(path) + return (False, '') + +def list_members(request): + + authors = Authors.objects.all() + + return render_response(request, 'main/members.html', {'members': authors, 'BASE_URL': BASE_URL}) + +def query(request): + + return render_response(request,'main/query.html',{'BASE_URL' : BASE_URL}) + +def archive(request,archive_year='',archive_month='',archive_day=''): + + # This setting gets the content truncated which contains more than words. + truncate_words = 250 + items_per_page = 25 + + #get the last run time + run_time = RunTime.objects.all()[0] + + + ### Determine if the request includes any query or not. ### + try: + does_getPage_exists = request.GET['page'] + except: + does_getPage_exists = None + + if ( (request.GET) and ( not( does_getPage_exists) )): + # Switch to 'return the result of query' mode. + + #Querying + #TODO: We should improve the querying method here. + if ( ('q_author_name' in request.GET) and (request.GET['q_author_name'] )): + for item in Authors.objects.filter(author_name__icontains = request.GET['q_author_name']): + try: + entries_list |= item.entries_set.all() + except: + entries_list = item.entries_set.all() + + if (('q_author_surname' in request.GET) and (request.GET['q_author_surname'])): + for item in Authors.objects.filter(author_name__icontains = request.GET['q_author_surname']): + try: + entries_list |= item.entries_set.all() + except: + entries_list = item.entries_set.all() + + if( ('q_text' in request.GET)and(request.GET['q_text'])): + try: + entries_list |= Entries.objects.filter(content_text__icontains = request.GET['q_text']) + except: + entries_list = Entries.objects.filter(content_text__icontains = request.GET['q_text']) + try: + if(not(entries_list)): + return HttpResponseRedirect(BASE_URL+"/query") + except: + return HttpResponseRedirect(BASE_URL+ "/query") + #here is gonna be edited [X] + return render_to_response('main/main.html' ,{ + 'entries_list':entries_list, + #'p_entries_list':p_entries_list, + 'truncate_words':truncate_words, + 'items_per_page':repr(items_per_page), + 'run_time':run_time, + #'archive_year':archive_year, + #'archive_month':archive_month, + #'error':error, + 'BASE_URL':BASE_URL, + }) + ### If not ### + else: + #Switch to return the result of arguments provided mode. + + selected_entries = Entries.objects.select_related() + + # For entry categories + entries_list1 = selected_entries.filter(entry_id__label_personal = 1) + entries_list2 = selected_entries.filter(entry_id__label_lkd = 1) + entries_list3 = selected_entries.filter(entry_id__label_community = 1) + entries_list = entries_list1 | entries_list2 | entries_list3 + + ## Validating arguments provided by urls.py. + # Check if archive_year is not empty and numeric. + if((archive_year != '' ) and (str(archive_year).isalnum()) and (not(str(archive_year).isalpha()))): + entries_list = entries_list.filter(date__year=archive_year) + else: + # Fall back to main view. + return HttpResponseRedirect(BASE_URL+"/main") + + # Check if archive_month is not empty and numeric. + if(archive_month != ''and (str(archive_month).isalnum()) and not(str(archive_month).isalpha())): + entries_list = entries_list.filter(date__month=archive_month) + + # Check if archive_day is not empty and numeric. + if(archive_day != ''and (str(archive_day).isalnum()) and not(str(archive_day).isalpha())): + entries_list = entries_list.filter(date__day=archive_day) + ## + + + # Pagination + elements_in_a_page = 25 # This determines, how many elements will be displayed in a paginator page. + paginator = Paginator(entries_list,elements_in_a_page) + + # Validation for page number if it is not int return first page. + try: + page = int(request.GET.get('page', '1')) + except ValueError: + page = 1 + + # If page request is out of range, return last page . + try: + p_entries_list = paginator.page(page) + except (EmptyPage, InvalidPage): + p_entries_list = paginator.page(paginator.num_pages) + + + + + return render_to_response('main/archive.html' ,{ + 'entries_list':entries_list, + 'p_entries_list':p_entries_list, + 'truncate_words':truncate_words, + 'items_per_page':repr(items_per_page), + 'run_time':run_time, + 'archive_year':archive_year, + 'archive_month':archive_month, + #'error':error, + 'BASE_URL':BASE_URL, + }) diff --git a/DJAGEN/branches/mustafa_branch/djagen/collector/wrappers.py b/DJAGEN/branches/mustafa_branch/djagen/collector/wrappers.py new file mode 100755 index 0000000..af35741 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/collector/wrappers.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django.shortcuts import render_to_response +from django.template import RequestContext + +def render_response(req, *args, **kwargs): + """ + Wrapper function that automatically adds "context_instance" to render_to_response + """ + + kwargs['context_instance'] = RequestContext(req) + return render_to_response(*args, **kwargs) diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/atom.xml.tmpl b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/atom.xml.tmpl new file mode 100755 index 0000000..c444d01 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/atom.xml.tmpl @@ -0,0 +1,61 @@ + + + + <TMPL_VAR name> + "/> + "/> + + + + + + xml:lang=""> + xml:lang="<TMPL_VAR title_language>"</TMPL_IF>><TMPL_VAR title ESCAPE="HTML"> + "/> + + + xml:lang=""> + + + + + + + + + + + + + + + + + + + + + <TMPL_VAR channel_title ESCAPE="HTML"> + + <TMPL_VAR channel_name ESCAPE="HTML"> + + + + + "/> + + + + + + + + + + + + + + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/atom.xml.tmplc b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/atom.xml.tmplc new file mode 100755 index 0000000..4939e63 Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/atom.xml.tmplc differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config.ini b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config.ini new file mode 100755 index 0000000..c54fd3b --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config.ini @@ -0,0 +1,42 @@ +[Planet] +name = Linux Gezegeni +link = http://gezegen.linux.org.tr +owner_name = Gezegen Ekibi +owner_email = gezegen@linux.org.tr +cache_directory = cache +new_feed_items = 1 +log_level = DEBUG +template_files = gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl +output_dir = www/ +# items_per_page = 15 +items_per_page = 25 +#days_per_page = 0 +feed_timeout = 20 + +# future_dates = ignore_date +# ignore_in_feed = updated + +encoding = utf-8 +locale = tr_TR.UTF-8 + +date_format = %d %b %Y @ %I:%M %p +#date_format = %B %d, %Y %I:%M %p +new_date_format = %d %B %Y + +[DEFAULT] +facewidth = 64 +faceheight = 64 + + +[http://www.hakanuygun.com/blog/?feed=atom&cat=13] +name = Hakan Uygun +nick = huygun +label = Personal +id = 1 + +[http://feeds.feedburner.com/oguzy-gezegen] +name = Oğuz Yarımtepe +face = oguzyarimtepe.png +nick = oyarimtepe +label = Personal +id = 2 diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config_entries.ini b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config_entries.ini new file mode 100755 index 0000000..aa6f9aa --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config_entries.ini @@ -0,0 +1,44 @@ +[Planet] +name = Linux Gezegeni +link = http://gezegen.linux.org.tr +label = Personal +id = +owner_name = Gezegen Ekibi +owner_email = gezegen@linux.org.tr +cache_directory = cache +new_feed_items = 1 +log_level = DEBUG +template_files = gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl +output_dir = www/ +# items_per_page = 15 +items_per_page = 25 +#days_per_page = 0 +feed_timeout = 20 + +# future_dates = ignore_date +# ignore_in_feed = updated + +encoding = utf-8 +locale = tr_TR.UTF-8 + +date_format = %d %b %Y @ %I:%M %p +#date_format = %B %d, %Y %I:%M %p +new_date_format = %d %B %Y + +[DEFAULT] +facewidth = 64 +faceheight = 64 + + +[http://www.hakanuygun.com/blog/?feed=atom&cat=13] +name = Hakan Uygun +nick = huygun +label = Personal +id = 1 + +[http://feeds.feedburner.com/oguzy-gezegen] +name = Oğuz Yarımtepe +face = oguzyarimtepe.png +nick = oyarimtepe +label = Personal +id = 2 diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config_entries.xml b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config_entries.xml new file mode 100755 index 0000000..f9848a4 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config_entries.xml @@ -0,0 +1,17 @@ + + + [http://www.bugunlinux.com/?feed=rss2] + Ahmet Yıldız + ayildiz + + 1 + + + + [http://www.bugunlinux.com/?feed=rss3] + Ahmet Yıldızz + ayildizz + + 2 + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config_header.xml b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config_header.xml new file mode 100755 index 0000000..949e8cf --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/config_header.xml @@ -0,0 +1,28 @@ + +
+ [Planet] + Linux Gezegeni + http://gezegen.linux.org.tr + Gezegen Ekibi + gezegen@linux.org.tr + cache + 1 + DEBUG + gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl + www/ + 25 + 20 + + utf-8 + tr_TR.UTF-8 + + %d %b %Y @ %I:%M %p + new_date_format = %d %B %Y +
+ +
+ [DEFAULT] + 64 + 64 +
+
diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/feeds.html.tmpl b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/feeds.html.tmpl new file mode 100755 index 0000000..acd9479 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/feeds.html.tmpl @@ -0,0 +1,22 @@ +
+ +
diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/feeds.html.tmplc b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/feeds.html.tmplc new file mode 100755 index 0000000..155f4e4 Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/feeds.html.tmplc differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/foafroll.xml.tmpl b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/foafroll.xml.tmpl new file mode 100755 index 0000000..f344738 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/foafroll.xml.tmpl @@ -0,0 +1,31 @@ + + + + + + " /> + + + + + + + "> + + + " /> + + + + + + + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/foafroll.xml.tmplc b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/foafroll.xml.tmplc new file mode 100755 index 0000000..d85d57a Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/foafroll.xml.tmplc differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/index.html.tmpl b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/index.html.tmpl new file mode 100755 index 0000000..7726f6b --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/index.html.tmpl @@ -0,0 +1,356 @@ + + + + <TMPL_VAR name> + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+ + +

+
+ +
+ + +
+
+
+ +

+ "> + + +

+ +
+   +
+
+
+
+
+ +
+
+
+ + "> + + + + &title=" target="_blank"> + + + " target="_blank"> + + +
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/index.html.tmplc b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/index.html.tmplc new file mode 100755 index 0000000..259931d Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/index.html.tmplc differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/opml.xml.tmpl b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/opml.xml.tmpl new file mode 100755 index 0000000..50bbabe --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/opml.xml.tmpl @@ -0,0 +1,16 @@ + + + + <TMPL_VAR name> + + + + + + + + + " xmlUrl=""/> + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/opml.xml.tmplc b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/opml.xml.tmplc new file mode 100755 index 0000000..f9309f9 Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/opml.xml.tmplc differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss10.xml.tmpl b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss10.xml.tmpl new file mode 100755 index 0000000..0cd709b --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss10.xml.tmpl @@ -0,0 +1,37 @@ + + +"> + <TMPL_VAR name> + + - + + + + + " /> + + + + + + +"> + <TMPL_VAR channel_name><TMPL_IF title>: <TMPL_VAR title></TMPL_IF> + + + + + + + + + + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss10.xml.tmplc b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss10.xml.tmplc new file mode 100755 index 0000000..18444f3 Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss10.xml.tmplc differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss20.xml.tmpl b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss20.xml.tmpl new file mode 100755 index 0000000..3ff7a11 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss20.xml.tmpl @@ -0,0 +1,30 @@ + + + + + <TMPL_VAR name> + + en + - + + + + <TMPL_VAR channel_name><TMPL_IF title>: <TMPL_VAR title></TMPL_IF> + + + + + + " align="right" width="" height="">]]> + + + + + + + + + + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss20.xml.tmplc b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss20.xml.tmplc new file mode 100755 index 0000000..21f007a Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/rss20.xml.tmplc differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/sidebar.html.tmpl b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/sidebar.html.tmpl new file mode 100755 index 0000000..acfdf4c --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/sidebar.html.tmpl @@ -0,0 +1,17 @@ + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/sidebar.html.tmplc b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/sidebar.html.tmplc new file mode 100755 index 0000000..50754dd Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/sidebar.html.tmplc differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/simple.html.tmpl b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/simple.html.tmpl new file mode 100755 index 0000000..2c20c6a --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/simple.html.tmpl @@ -0,0 +1,74 @@ + + + + + + + <TMPL_VAR name> + + + + + + + + + + + + + + + +

+
+ +
+ + + +
+
+ +

">

+
+
+
+ +
+ + + +
+ +
+
+ + + + + + + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/simple.html.tmplc b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/simple.html.tmplc new file mode 100755 index 0000000..d466e42 Binary files /dev/null and b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/simple.html.tmplc differ diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/zaman.sh b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/zaman.sh new file mode 100755 index 0000000..e0c9a2b --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/gezegen/zaman.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +while read x +do + echo "$(date)::$x" +done diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet-cache.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet-cache.py new file mode 100755 index 0000000..9334583 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet-cache.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Planet cache tool. + +""" + +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + + +import os +import sys +import time +import dbhash +import ConfigParser + +import planet + + +def usage(): + print "Usage: planet-cache [options] CACHEFILE [ITEMID]..." + print + print "Examine and modify information in the Planet cache." + print + print "Channel Commands:" + print " -C, --channel Display known information on the channel" + print " -L, --list List items in the channel" + print " -K, --keys List all keys found in channel items" + print + print "Item Commands (need ITEMID):" + print " -I, --item Display known information about the item(s)" + print " -H, --hide Mark the item(s) as hidden" + print " -U, --unhide Mark the item(s) as not hidden" + print + print "Other Options:" + print " -h, --help Display this help message and exit" + sys.exit(0) + +def usage_error(msg, *args): + print >>sys.stderr, msg, " ".join(args) + print >>sys.stderr, "Perhaps you need --help ?" + sys.exit(1) + +def print_keys(item, title): + keys = item.keys() + keys.sort() + key_len = max([ len(k) for k in keys ]) + + print title + ":" + for key in keys: + if item.key_type(key) == item.DATE: + value = time.strftime(planet.TIMEFMT_ISO, item[key]) + else: + value = str(item[key]) + print " %-*s %s" % (key_len, key, fit_str(value, 74 - key_len)) + +def fit_str(string, length): + if len(string) <= length: + return string + else: + return string[:length-4] + " ..." + + +if __name__ == "__main__": + cache_file = None + want_ids = 0 + ids = [] + + command = None + + for arg in sys.argv[1:]: + if arg == "-h" or arg == "--help": + usage() + elif arg == "-C" or arg == "--channel": + if command is not None: + usage_error("Only one command option may be supplied") + command = "channel" + elif arg == "-L" or arg == "--list": + if command is not None: + usage_error("Only one command option may be supplied") + command = "list" + elif arg == "-K" or arg == "--keys": + if command is not None: + usage_error("Only one command option may be supplied") + command = "keys" + elif arg == "-I" or arg == "--item": + if command is not None: + usage_error("Only one command option may be supplied") + command = "item" + want_ids = 1 + elif arg == "-H" or arg == "--hide": + if command is not None: + usage_error("Only one command option may be supplied") + command = "hide" + want_ids = 1 + elif arg == "-U" or arg == "--unhide": + if command is not None: + usage_error("Only one command option may be supplied") + command = "unhide" + want_ids = 1 + elif arg.startswith("-"): + usage_error("Unknown option:", arg) + else: + if cache_file is None: + cache_file = arg + elif want_ids: + ids.append(arg) + else: + usage_error("Unexpected extra argument:", arg) + + if cache_file is None: + usage_error("Missing expected cache filename") + elif want_ids and not len(ids): + usage_error("Missing expected entry ids") + + # Open the cache file directly to get the URL it represents + try: + db = dbhash.open(cache_file) + url = db["url"] + db.close() + except dbhash.bsddb._db.DBError, e: + print >>sys.stderr, cache_file + ":", e.args[1] + sys.exit(1) + except KeyError: + print >>sys.stderr, cache_file + ": Probably not a cache file" + sys.exit(1) + + # Now do it the right way :-) + my_planet = planet.Planet(ConfigParser.ConfigParser()) + my_planet.cache_directory = os.path.dirname(cache_file) + channel = planet.Channel(my_planet, url) + + for item_id in ids: + if not channel.has_item(item_id): + print >>sys.stderr, item_id + ": Not in channel" + sys.exit(1) + + # Do the user's bidding + if command == "channel": + print_keys(channel, "Channel Keys") + + elif command == "item": + for item_id in ids: + item = channel.get_item(item_id) + print_keys(item, "Item Keys for %s" % item_id) + + elif command == "list": + print "Items in Channel:" + for item in channel.items(hidden=1, sorted=1): + print " " + item.id + print " " + time.strftime(planet.TIMEFMT_ISO, item.date) + if hasattr(item, "title"): + print " " + fit_str(item.title, 70) + if hasattr(item, "hidden"): + print " (hidden)" + + elif command == "keys": + keys = {} + for item in channel.items(): + for key in item.keys(): + keys[key] = 1 + + keys = keys.keys() + keys.sort() + + print "Keys used in Channel:" + for key in keys: + print " " + key + print + + print "Use --item to output values of particular items." + + elif command == "hide": + for item_id in ids: + item = channel.get_item(item_id) + if hasattr(item, "hidden"): + print item_id + ": Already hidden." + else: + item.hidden = "yes" + + channel.cache_write() + print "Done." + + elif command == "unhide": + for item_id in ids: + item = channel.get_item(item_id) + if hasattr(item, "hidden"): + del(item.hidden) + else: + print item_id + ": Not hidden." + + channel.cache_write() + print "Done." diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet.py new file mode 100755 index 0000000..a245a76 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python +"""The Planet aggregator. + +A flexible and easy-to-use aggregator for generating websites. + +Visit http://www.planetplanet.org/ for more information and to download +the latest version. + +Requires Python 2.1, recommends 2.3. +""" + +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + +import datetime + +import os +import sys +import time +import locale +import urlparse + +import planet + +from ConfigParser import ConfigParser + +# Default configuration file path +CONFIG_FILE = "config.ini" + +# Defaults for the [Planet] config section +PLANET_NAME = "Unconfigured Planet" +PLANET_LINK = "Unconfigured Planet" +PLANET_FEED = None +OWNER_NAME = "Anonymous Coward" +OWNER_EMAIL = "" +LOG_LEVEL = "WARNING" +FEED_TIMEOUT = 20 # seconds + +# Default template file list +TEMPLATE_FILES = "examples/basic/planet.html.tmpl" + +#part for django api usage +import sys +import os +# In order to reduce integration issues, this path gets defined automatically. +sys.path.append(os.path.abspath('../..')) + +os.environ['DJANGO_SETTINGS_MODULE'] = 'djagen.settings' +from djagen.collector.models import * + +def config_get(config, section, option, default=None, raw=0, vars=None): + """Get a value from the configuration, with a default.""" + if config.has_option(section, option): + return config.get(section, option, raw=raw, vars=None) + else: + return default + +def main(): + config_file = CONFIG_FILE + offline = 0 + verbose = 0 + + for arg in sys.argv[1:]: + if arg == "-h" or arg == "--help": + print "Usage: planet [options] [CONFIGFILE]" + print + print "Options:" + print " -v, --verbose DEBUG level logging during update" + print " -o, --offline Update the Planet from the cache only" + print " -h, --help Display this help message and exit" + print + sys.exit(0) + elif arg == "-v" or arg == "--verbose": + verbose = 1 + elif arg == "-o" or arg == "--offline": + offline = 1 + elif arg.startswith("-"): + print >>sys.stderr, "Unknown option:", arg + sys.exit(1) + else: + config_file = arg + + # Read the configuration file + config = ConfigParser() + config.read(config_file) + if not config.has_section("Planet"): + print >>sys.stderr, "Configuration missing [Planet] section." + sys.exit(1) + + # Read the [Planet] config section + planet_name = config_get(config, "Planet", "name", PLANET_NAME) + planet_link = config_get(config, "Planet", "link", PLANET_LINK) + planet_feed = config_get(config, "Planet", "feed", PLANET_FEED) + owner_name = config_get(config, "Planet", "owner_name", OWNER_NAME) + owner_email = config_get(config, "Planet", "owner_email", OWNER_EMAIL) + if verbose: + log_level = "DEBUG" + else: + log_level = config_get(config, "Planet", "log_level", LOG_LEVEL) + feed_timeout = config_get(config, "Planet", "feed_timeout", FEED_TIMEOUT) + template_files = config_get(config, "Planet", "template_files", + TEMPLATE_FILES).split(" ") + + # Default feed to the first feed for which there is a template + if not planet_feed: + for template_file in template_files: + name = os.path.splitext(os.path.basename(template_file))[0] + if name.find('atom')>=0 or name.find('rss')>=0: + planet_feed = urlparse.urljoin(planet_link, name) + break + + # Define locale + if config.has_option("Planet", "locale"): + # The user can specify more than one locale (separated by ":") as + # fallbacks. + locale_ok = False + for user_locale in config.get("Planet", "locale").split(':'): + user_locale = user_locale.strip() + try: + locale.setlocale(locale.LC_ALL, user_locale) + except locale.Error: + pass + else: + locale_ok = True + break + if not locale_ok: + print >>sys.stderr, "Unsupported locale setting." + sys.exit(1) + + # Activate logging + planet.logging.basicConfig() + planet.logging.getLogger().setLevel(planet.logging.getLevelName(log_level)) + log = planet.logging.getLogger("planet.runner") + try: + log.warning + except: + log.warning = log.warn + + # timeoutsocket allows feedparser to time out rather than hang forever on + # ultra-slow servers. Python 2.3 now has this functionality available in + # the standard socket library, so under 2.3 you don't need to install + # anything. But you probably should anyway, because the socket module is + # buggy and timeoutsocket is better. + if feed_timeout: + try: + feed_timeout = float(feed_timeout) + except: + log.warning("Feed timeout set to invalid value '%s', skipping", feed_timeout) + feed_timeout = None + + if feed_timeout and not offline: + try: + from planet import timeoutsocket + timeoutsocket.setDefaultSocketTimeout(feed_timeout) + log.debug("Socket timeout set to %d seconds", feed_timeout) + except ImportError: + import socket + if hasattr(socket, 'setdefaulttimeout'): + log.debug("timeoutsocket not found, using python function") + socket.setdefaulttimeout(feed_timeout) + log.debug("Socket timeout set to %d seconds", feed_timeout) + else: + log.error("Unable to set timeout to %d seconds", feed_timeout) + + # run the planet + my_planet = planet.Planet(config) + my_planet.run(planet_name, planet_link, template_files, offline) + + + + ## This is where archiving is done! ## + #add the current channels to the db + channels = my_planet.channels() + for channel in channels: + + author_name = channel.name + + try: + author_face = channel.face + except: + author_face = None + try: + channel_subtitle = channel.subtitle + except: + channel_subtitle = None + try: + channel_title = channel.title + except: + channel_title = None + + channel_url = channel.url + + try: + channel_link = channel.link + except: + channel_link = None + + try: + channel_urlstatus = channel.url_status + except: + channel_urlstatus = None + + label = channel.label + + label_personal = 0 + label_lkd = 0 + label_community = 0 + label_eng = 0 + if label == "Personal": + label_personal = 1 + if label == "LKD": + label_lkd = 1 + if label == "Community": + label_community = 1 + if label == "Eng": + label_eng = 1 + + id = channel.id + + try: + author = Authors.objects.get(author_id=id) + + #update the values with the ones at the config file + author.author_name = author_name + #print author_name + author.author_face = author_face + author.channel_subtitle = channel_subtitle + author.channel_title = channel_title + author.channel_url = channel_url + author.channel_link = channel_link + author.channel_url_status = channel_urlstatus + author.label_personal = label_personal + author.label_lkd = label_lkd + author.label_community = label_community + author.label_eng = label_eng + + except Exception, ex: + #print ex + author = Authors(author_id=id, author_name=author_name, author_face=author_face, channel_subtitle=channel_subtitle, channel_title=channel_title, channel_url=channel_url, channel_link=channel_link, channel_urlstatus=channel_urlstatus, label_personal=label_personal, label_lkd=label_lkd, label_community=label_community, label_eng=label_eng) + + + author.save() + + #entry issues + items = channel.items() + for item in items: + id_hash = item.id_hash + + try: + entry = author.entries_set.get(id_hash = id_hash) + entry.title = item.title + entry.content_html = item.content + entry.content_text = entry.sanitize(item.content) + entry.summary = item.summary + entry.link = item.link + d = item.date + entry.date = datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5]) + except: + content_html = item.content + #content_text = entry.sanitize(content_html) + d = item.date + if not item.has_key('summary'): summary = None + else: summary = item.summary + entry = author.entries_set.create(id_hash=id_hash, title=item.title, content_html=item.content, summary=summary, link=item.link, date=datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5])) + entry.content_text = entry.sanitize(content_html) + + entry.save() + + #datetime issue + r = RunTime() + r.save() + + my_planet.generate_all_files(template_files, planet_name, + planet_link, planet_feed, owner_name, owner_email) + + +if __name__ == "__main__": + main() + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/__init__.py new file mode 100755 index 0000000..7829731 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/__init__.py @@ -0,0 +1,969 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Planet aggregator library. + +This package is a library for developing web sites or software that +aggregate RSS, CDF and Atom feeds taken from elsewhere into a single, +combined feed. +""" + +__version__ = "2.0" +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + +import locale + +# Modules available without separate import +import cache +import feedparser +import sanitize +import htmltmpl +import sgmllib +try: + import logging +except: + import compat_logging as logging + +# Limit the effect of "from planet import *" +__all__ = ("cache", "feedparser", "htmltmpl", "logging", + "Planet", "Channel", "NewsItem") + + +import os +import md5 +import time +import dbhash +import re + +try: + from xml.sax.saxutils import escape +except: + def escape(data): + return data.replace("&","&").replace(">",">").replace("<","<") + +# Version information (for generator headers) +VERSION = ("Planet/%s +http://www.planetplanet.org" % __version__) + +# Default User-Agent header to send when retreiving feeds +USER_AGENT = VERSION + " " + feedparser.USER_AGENT + +# Default cache directory +CACHE_DIRECTORY = "cache" + +# Default number of items to display from a new feed +NEW_FEED_ITEMS = 10 + +# Useful common date/time formats +TIMEFMT_ISO = "%Y-%m-%dT%H:%M:%S+00:00" +TIMEFMT_822 = "%a, %d %b %Y %H:%M:%S +0000" + + +# Log instance to use here +log = logging.getLogger("planet") +try: + log.warning +except: + log.warning = log.warn + +# Defaults for the template file config sections +ENCODING = "utf-8" +ITEMS_PER_PAGE = 60 +DAYS_PER_PAGE = 0 +OUTPUT_DIR = "output" +DATE_FORMAT = "%B %d, %Y %I:%M %p" +NEW_DATE_FORMAT = "%B %d, %Y" +ACTIVITY_THRESHOLD = 0 + +class stripHtml(sgmllib.SGMLParser): + "remove all tags from the data" + def __init__(self, data): + sgmllib.SGMLParser.__init__(self) + self.result='' + self.feed(data) + self.close() + def handle_data(self, data): + if data: self.result+=data + +def template_info(item, date_format): + """Produce a dictionary of template information.""" + info = {} + + #set the locale so that the dates at the feeds will be in english + lc=locale.getlocale() + if lc[0] == None: + try: + locale.setlocale(locale.LC_ALL, '') + except: + pass + elif lc[0].find("tr") != -1: + try: + locale.setlocale(locale.LC_ALL, '') + except: + pass + + for key in item.keys(): + if item.key_type(key) == item.DATE: + date = item.get_as_date(key) + info[key] = time.strftime(date_format, date) + info[key + "_iso"] = time.strftime(TIMEFMT_ISO, date) + info[key + "_822"] = time.strftime(TIMEFMT_822, date) + else: + info[key] = item[key] + if 'title' in item.keys(): + info['title_plain'] = stripHtml(info['title']).result + + return info + + +class Planet: + """A set of channels. + + This class represents a set of channels for which the items will + be aggregated together into one combined feed. + + Properties: + user_agent User-Agent header to fetch feeds with. + cache_directory Directory to store cached channels in. + new_feed_items Number of items to display from a new feed. + filter A regular expression that articles must match. + exclude A regular expression that articles must not match. + """ + def __init__(self, config): + self.config = config + + self._channels = [] + + self.user_agent = USER_AGENT + self.cache_directory = CACHE_DIRECTORY + self.new_feed_items = NEW_FEED_ITEMS + self.filter = None + self.exclude = None + + def tmpl_config_get(self, template, option, default=None, raw=0, vars=None): + """Get a template value from the configuration, with a default.""" + if self.config.has_option(template, option): + return self.config.get(template, option, raw=raw, vars=None) + elif self.config.has_option("Planet", option): + return self.config.get("Planet", option, raw=raw, vars=None) + else: + return default + + def gather_channel_info(self, template_file="Planet"): + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + + activity_threshold = int(self.tmpl_config_get(template_file, + "activity_threshold", + ACTIVITY_THRESHOLD)) + + if activity_threshold: + activity_horizon = \ + time.gmtime(time.time()-86400*activity_threshold) + else: + activity_horizon = 0 + + channels = {} + channels_list = [] + for channel in self.channels(hidden=1): + channels[channel] = template_info(channel, date_format) + channels_list.append(channels[channel]) + + # identify inactive feeds + if activity_horizon: + latest = channel.items(sorted=1) + if len(latest)==0 or latest[0].date < activity_horizon: + channels[channel]["message"] = \ + "no activity in %d days" % activity_threshold + + # report channel level errors + if not channel.url_status: continue + status = int(channel.url_status) + if status == 403: + channels[channel]["message"] = "403: forbidden" + elif status == 404: + channels[channel]["message"] = "404: not found" + elif status == 408: + channels[channel]["message"] = "408: request timeout" + elif status == 410: + channels[channel]["message"] = "410: gone" + elif status == 500: + channels[channel]["message"] = "internal server error" + elif status >= 400: + channels[channel]["message"] = "http status %s" % status + + return channels, channels_list + + def gather_items_info(self, channels, template_file="Planet", channel_list=None): + items_list = [] + prev_date = [] + prev_channel = None + + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + items_per_page = int(self.tmpl_config_get(template_file, + "items_per_page", ITEMS_PER_PAGE)) + days_per_page = int(self.tmpl_config_get(template_file, + "days_per_page", DAYS_PER_PAGE)) + new_date_format = self.tmpl_config_get(template_file, + "new_date_format", NEW_DATE_FORMAT, raw=1) + + for newsitem in self.items(max_items=items_per_page, + max_days=days_per_page, + channels=channel_list): + item_info = template_info(newsitem, date_format) + chan_info = channels[newsitem._channel] + for k, v in chan_info.items(): + item_info["channel_" + k] = v + + # Check for the start of a new day + if prev_date[:3] != newsitem.date[:3]: + prev_date = newsitem.date + item_info["new_date"] = time.strftime(new_date_format, + newsitem.date) + + # Check for the start of a new channel + if item_info.has_key("new_date") \ + or prev_channel != newsitem._channel: + prev_channel = newsitem._channel + item_info["new_channel"] = newsitem._channel.url + + items_list.append(item_info) + + return items_list + + def run(self, planet_name, planet_link, template_files, offline = False): + log = logging.getLogger("planet.runner") + + # Create a planet + log.info("Loading cached data") + if self.config.has_option("Planet", "cache_directory"): + self.cache_directory = self.config.get("Planet", "cache_directory") + if self.config.has_option("Planet", "new_feed_items"): + self.new_feed_items = int(self.config.get("Planet", "new_feed_items")) + self.user_agent = "%s +%s %s" % (planet_name, planet_link, + self.user_agent) + if self.config.has_option("Planet", "filter"): + self.filter = self.config.get("Planet", "filter") + + # The other configuration blocks are channels to subscribe to + for feed_url in self.config.sections(): + if feed_url == "Planet" or feed_url in template_files: + continue + log.info(feed_url) + # Create a channel, configure it and subscribe it + channel = Channel(self, feed_url) + self.subscribe(channel) + + # Update it + try: + if not offline and not channel.url_status == '410': + channel.update() + except KeyboardInterrupt: + raise + except: + log.exception("Update of <%s> failed", feed_url) + + def generate_all_files(self, template_files, planet_name, + planet_link, planet_feed, owner_name, owner_email): + + log = logging.getLogger("planet.runner") + # Go-go-gadget-template + for template_file in template_files: + manager = htmltmpl.TemplateManager() + log.info("Processing template %s", template_file) + try: + template = manager.prepare(template_file) + except htmltmpl.TemplateError: + template = manager.prepare(os.path.basename(template_file)) + # Read the configuration + output_dir = self.tmpl_config_get(template_file, + "output_dir", OUTPUT_DIR) + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + encoding = self.tmpl_config_get(template_file, "encoding", ENCODING) + + # We treat each template individually + base = os.path.splitext(os.path.basename(template_file))[0] + url = os.path.join(planet_link, base) + output_file = os.path.join(output_dir, base) + + # Gather information + channels, channels_list = self.gather_channel_info(template_file) + items_list = self.gather_items_info(channels, template_file) + + # Gather item information + + # Process the template + tp = htmltmpl.TemplateProcessor(html_escape=0) + tp.set("Items", items_list) + tp.set("Channels", channels_list) + + # Generic information + tp.set("generator", VERSION) + tp.set("name", planet_name) + tp.set("link", planet_link) + tp.set("owner_name", owner_name) + tp.set("owner_email", owner_email) + tp.set("url", url) + + if planet_feed: + tp.set("feed", planet_feed) + tp.set("feedtype", planet_feed.find('rss')>=0 and 'rss' or 'atom') + + # Update time + date = time.localtime() + tp.set("date", time.strftime(date_format, date)) + tp.set("date_iso", time.strftime(TIMEFMT_ISO, date)) + tp.set("date_822", time.strftime(TIMEFMT_822, date)) + + try: + log.info("Writing %s", output_file) + output_fd = open(output_file, "w") + if encoding.lower() in ("utf-8", "utf8"): + # UTF-8 output is the default because we use that internally + output_fd.write(tp.process(template)) + elif encoding.lower() in ("xml", "html", "sgml"): + # Magic for Python 2.3 users + output = tp.process(template).decode("utf-8") + output_fd.write(output.encode("ascii", "xmlcharrefreplace")) + else: + # Must be a "known" encoding + output = tp.process(template).decode("utf-8") + output_fd.write(output.encode(encoding, "replace")) + output_fd.close() + except KeyboardInterrupt: + raise + except: + log.exception("Write of %s failed", output_file) + + def channels(self, hidden=0, sorted=1): + """Return the list of channels.""" + channels = [] + for channel in self._channels: + if hidden or not channel.has_key("hidden"): + channels.append((channel.name, channel)) + + if sorted: + channels.sort() + + return [ c[-1] for c in channels ] + + def find_by_basename(self, basename): + for channel in self._channels: + if basename == channel.cache_basename(): return channel + + def subscribe(self, channel): + """Subscribe the planet to the channel.""" + self._channels.append(channel) + + def unsubscribe(self, channel): + """Unsubscribe the planet from the channel.""" + self._channels.remove(channel) + + def items(self, hidden=0, sorted=1, max_items=0, max_days=0, channels=None): + """Return an optionally filtered list of items in the channel. + + The filters are applied in the following order: + + If hidden is true then items in hidden channels and hidden items + will be returned. + + If sorted is true then the item list will be sorted with the newest + first. + + If max_items is non-zero then this number of items, at most, will + be returned. + + If max_days is non-zero then any items older than the newest by + this number of days won't be returned. Requires sorted=1 to work. + + + The sharp-eyed will note that this looks a little strange code-wise, + it turns out that Python gets *really* slow if we try to sort the + actual items themselves. Also we use mktime here, but it's ok + because we discard the numbers and just need them to be relatively + consistent between each other. + """ + planet_filter_re = None + if self.filter: + planet_filter_re = re.compile(self.filter, re.I) + planet_exclude_re = None + if self.exclude: + planet_exclude_re = re.compile(self.exclude, re.I) + + items = [] + seen_guids = {} + if not channels: channels=self.channels(hidden=hidden, sorted=0) + for channel in channels: + for item in channel._items.values(): + if hidden or not item.has_key("hidden"): + + channel_filter_re = None + if channel.filter: + channel_filter_re = re.compile(channel.filter, + re.I) + channel_exclude_re = None + if channel.exclude: + channel_exclude_re = re.compile(channel.exclude, + re.I) + if (planet_filter_re or planet_exclude_re \ + or channel_filter_re or channel_exclude_re): + title = "" + if item.has_key("title"): + title = item.title + content = item.get_content("content") + + if planet_filter_re: + if not (planet_filter_re.search(title) \ + or planet_filter_re.search(content)): + continue + + if planet_exclude_re: + if (planet_exclude_re.search(title) \ + or planet_exclude_re.search(content)): + continue + + if channel_filter_re: + if not (channel_filter_re.search(title) \ + or channel_filter_re.search(content)): + continue + + if channel_exclude_re: + if (channel_exclude_re.search(title) \ + or channel_exclude_re.search(content)): + continue + + if not seen_guids.has_key(item.id): + seen_guids[item.id] = 1; + items.append((time.mktime(item.date), item.order, item)) + + # Sort the list + if sorted: + items.sort() + items.reverse() + + # Apply max_items filter + if len(items) and max_items: + items = items[:max_items] + + # Apply max_days filter + if len(items) and max_days: + max_count = 0 + max_time = items[0][0] - max_days * 84600 + for item in items: + if item[0] > max_time: + max_count += 1 + else: + items = items[:max_count] + break + + return [ i[-1] for i in items ] + +class Channel(cache.CachedInfo): + """A list of news items. + + This class represents a list of news items taken from the feed of + a website or other source. + + Properties: + url URL of the feed. + url_etag E-Tag of the feed URL. + url_modified Last modified time of the feed URL. + url_status Last HTTP status of the feed URL. + hidden Channel should be hidden (True if exists). + name Name of the feed owner, or feed title. + next_order Next order number to be assigned to NewsItem + + updated Correct UTC-Normalised update time of the feed. + last_updated Correct UTC-Normalised time the feed was last updated. + + id An identifier the feed claims is unique (*). + title One-line title (*). + link Link to the original format feed (*). + tagline Short description of the feed (*). + info Longer description of the feed (*). + + modified Date the feed claims to have been modified (*). + + author Name of the author (*). + publisher Name of the publisher (*). + generator Name of the feed generator (*). + category Category name (*). + copyright Copyright information for humans to read (*). + license Link to the licence for the content (*). + docs Link to the specification of the feed format (*). + language Primary language (*). + errorreportsto E-Mail address to send error reports to (*). + + image_url URL of an associated image (*). + image_link Link to go with the associated image (*). + image_title Alternative text of the associated image (*). + image_width Width of the associated image (*). + image_height Height of the associated image (*). + + filter A regular expression that articles must match. + exclude A regular expression that articles must not match. + + Properties marked (*) will only be present if the original feed + contained them. Note that the optional 'modified' date field is simply + a claim made by the item and parsed from the information given, 'updated' + (and 'last_updated') are far more reliable sources of information. + + Some feeds may define additional properties to those above. + """ + IGNORE_KEYS = ("links", "contributors", "textinput", "cloud", "categories", + "url", "href", "url_etag", "url_modified", "tags", "itunes_explicit") + + def __init__(self, planet, url): + if not os.path.isdir(planet.cache_directory): + os.makedirs(planet.cache_directory) + cache_filename = cache.filename(planet.cache_directory, url) + cache_file = dbhash.open(cache_filename, "c", 0666) + + cache.CachedInfo.__init__(self, cache_file, url, root=1) + + self._items = {} + self._planet = planet + self._expired = [] + self.url = url + # retain the original URL for error reporting + self.configured_url = url + self.url_etag = None + self.url_status = None + self.url_modified = None + self.name = None + self.updated = None + self.last_updated = None + self.filter = None + self.exclude = None + self.next_order = "0" + self.cache_read() + self.cache_read_entries() + + if planet.config.has_section(url): + for option in planet.config.options(url): + value = planet.config.get(url, option) + self.set_as_string(option, value, cached=0) + + def has_item(self, id_): + """Check whether the item exists in the channel.""" + return self._items.has_key(id_) + + def get_item(self, id_): + """Return the item from the channel.""" + return self._items[id_] + + # Special methods + __contains__ = has_item + + def items(self, hidden=0, sorted=0): + """Return the item list.""" + items = [] + for item in self._items.values(): + if hidden or not item.has_key("hidden"): + items.append((time.mktime(item.date), item.order, item)) + + if sorted: + items.sort() + items.reverse() + + return [ i[-1] for i in items ] + + def __iter__(self): + """Iterate the sorted item list.""" + return iter(self.items(sorted=1)) + + def cache_read_entries(self): + """Read entry information from the cache.""" + keys = self._cache.keys() + for key in keys: + if key.find(" ") != -1: continue + if self.has_key(key): continue + + item = NewsItem(self, key) + self._items[key] = item + + def cache_basename(self): + return cache.filename('',self._id) + + def cache_write(self, sync=1): + + """Write channel and item information to the cache.""" + for item in self._items.values(): + item.cache_write(sync=0) + for item in self._expired: + item.cache_clear(sync=0) + cache.CachedInfo.cache_write(self, sync) + + self._expired = [] + + def feed_information(self): + """ + Returns a description string for the feed embedded in this channel. + + This will usually simply be the feed url embedded in <>, but in the + case where the current self.url has changed from the original + self.configured_url the string will contain both pieces of information. + This is so that the URL in question is easier to find in logging + output: getting an error about a URL that doesn't appear in your config + file is annoying. + """ + if self.url == self.configured_url: + return "<%s>" % self.url + else: + return "<%s> (formerly <%s>)" % (self.url, self.configured_url) + + def update(self): + """Download the feed to refresh the information. + + This does the actual work of pulling down the feed and if it changes + updates the cached information about the feed and entries within it. + """ + info = feedparser.parse(self.url, + etag=self.url_etag, modified=self.url_modified, + agent=self._planet.user_agent) + if info.has_key("status"): + self.url_status = str(info.status) + elif info.has_key("entries") and len(info.entries)>0: + self.url_status = str(200) + elif info.bozo and info.bozo_exception.__class__.__name__=='Timeout': + self.url_status = str(408) + else: + self.url_status = str(500) + + if self.url_status == '301' and \ + (info.has_key("entries") and len(info.entries)>0): + log.warning("Feed has moved from <%s> to <%s>", self.url, info.url) + try: + os.link(cache.filename(self._planet.cache_directory, self.url), + cache.filename(self._planet.cache_directory, info.url)) + except: + pass + self.url = info.url + elif self.url_status == '304': + log.info("Feed %s unchanged", self.feed_information()) + return + elif self.url_status == '410': + log.info("Feed %s gone", self.feed_information()) + self.cache_write() + return + elif self.url_status == '408': + log.warning("Feed %s timed out", self.feed_information()) + return + elif int(self.url_status) >= 400: + log.error("Error %s while updating feed %s", + self.url_status, self.feed_information()) + return + else: + log.info("Updating feed %s", self.feed_information()) + + self.url_etag = info.has_key("etag") and info.etag or None + self.url_modified = info.has_key("modified") and info.modified or None + if self.url_etag is not None: + log.debug("E-Tag: %s", self.url_etag) + if self.url_modified is not None: + log.debug("Last Modified: %s", + time.strftime(TIMEFMT_ISO, self.url_modified)) + + self.update_info(info.feed) + self.update_entries(info.entries) + self.cache_write() + + def update_info(self, feed): + """Update information from the feed. + + This reads the feed information supplied by feedparser and updates + the cached information about the feed. These are the various + potentially interesting properties that you might care about. + """ + for key in feed.keys(): + if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS: + # Ignored fields + pass + elif feed.has_key(key + "_parsed"): + # Ignore unparsed date fields + pass + elif key.endswith("_detail"): + # retain name and email sub-fields + if feed[key].has_key('name') and feed[key].name: + self.set_as_string(key.replace("_detail","_name"), \ + feed[key].name) + if feed[key].has_key('email') and feed[key].email: + self.set_as_string(key.replace("_detail","_email"), \ + feed[key].email) + elif key == "items": + # Ignore items field + pass + elif key.endswith("_parsed"): + # Date fields + if feed[key] is not None: + self.set_as_date(key[:-len("_parsed")], feed[key]) + elif key == "image": + # Image field: save all the information + if feed[key].has_key("url"): + self.set_as_string(key + "_url", feed[key].url) + if feed[key].has_key("link"): + self.set_as_string(key + "_link", feed[key].link) + if feed[key].has_key("title"): + self.set_as_string(key + "_title", feed[key].title) + if feed[key].has_key("width"): + self.set_as_string(key + "_width", str(feed[key].width)) + if feed[key].has_key("height"): + self.set_as_string(key + "_height", str(feed[key].height)) + elif isinstance(feed[key], (str, unicode)): + # String fields + try: + detail = key + '_detail' + if feed.has_key(detail) and feed[detail].has_key('type'): + if feed[detail].type == 'text/html': + feed[key] = sanitize.HTML(feed[key]) + elif feed[detail].type == 'text/plain': + feed[key] = escape(feed[key]) + self.set_as_string(key, feed[key]) + except KeyboardInterrupt: + raise + except: + log.exception("Ignored '%s' of <%s>, unknown format", + key, self.url) + + def update_entries(self, entries): + """Update entries from the feed. + + This reads the entries supplied by feedparser and updates the + cached information about them. It's at this point we update + the 'updated' timestamp and keep the old one in 'last_updated', + these provide boundaries for acceptable entry times. + + If this is the first time a feed has been updated then most of the + items will be marked as hidden, according to Planet.new_feed_items. + + If the feed does not contain items which, according to the sort order, + should be there; those items are assumed to have been expired from + the feed or replaced and are removed from the cache. + """ + if not len(entries): + return + + self.last_updated = self.updated + self.updated = time.gmtime() + + new_items = [] + feed_items = [] + for entry in entries: + # Try really hard to find some kind of unique identifier + if entry.has_key("id"): + entry_id = cache.utf8(entry.id) + elif entry.has_key("link"): + entry_id = cache.utf8(entry.link) + elif entry.has_key("title"): + entry_id = (self.url + "/" + + md5.new(cache.utf8(entry.title)).hexdigest()) + elif entry.has_key("summary"): + entry_id = (self.url + "/" + + md5.new(cache.utf8(entry.summary)).hexdigest()) + else: + log.error("Unable to find or generate id, entry ignored") + continue + + # Create the item if necessary and update + if self.has_item(entry_id): + item = self._items[entry_id] + else: + item = NewsItem(self, entry_id) + self._items[entry_id] = item + new_items.append(item) + item.update(entry) + feed_items.append(entry_id) + + # Hide excess items the first time through + if self.last_updated is None and self._planet.new_feed_items \ + and len(feed_items) > self._planet.new_feed_items: + item.hidden = "yes" + log.debug("Marked <%s> as hidden (new feed)", entry_id) + + # Assign order numbers in reverse + new_items.reverse() + for item in new_items: + item.order = self.next_order = str(int(self.next_order) + 1) + + # Check for expired or replaced items + feed_count = len(feed_items) + log.debug("Items in Feed: %d", feed_count) + for item in self.items(sorted=1): + if feed_count < 1: + break + elif item.id in feed_items: + feed_count -= 1 + elif item._channel.url_status != '226': + del(self._items[item.id]) + self._expired.append(item) + log.debug("Removed expired or replaced item <%s>", item.id) + + def get_name(self, key): + """Return the key containing the name.""" + for key in ("name", "title"): + if self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_string(key) + + return "" + +class NewsItem(cache.CachedInfo): + """An item of news. + + This class represents a single item of news on a channel. They're + created by members of the Channel class and accessible through it. + + Properties: + id Channel-unique identifier for this item. + id_hash Relatively short, printable cryptographic hash of id + date Corrected UTC-Normalised update time, for sorting. + order Order in which items on the same date can be sorted. + hidden Item should be hidden (True if exists). + + title One-line title (*). + link Link to the original format text (*). + summary Short first-page summary (*). + content Full HTML content. + + modified Date the item claims to have been modified (*). + issued Date the item claims to have been issued (*). + created Date the item claims to have been created (*). + expired Date the item claims to expire (*). + + author Name of the author (*). + publisher Name of the publisher (*). + category Category name (*). + comments Link to a page to enter comments (*). + license Link to the licence for the content (*). + source_name Name of the original source of this item (*). + source_link Link to the original source of this item (*). + + Properties marked (*) will only be present if the original feed + contained them. Note that the various optional date fields are + simply claims made by the item and parsed from the information + given, 'date' is a far more reliable source of information. + + Some feeds may define additional properties to those above. + """ + IGNORE_KEYS = ("categories", "contributors", "enclosures", "links", + "guidislink", "date", "tags") + + def __init__(self, channel, id_): + cache.CachedInfo.__init__(self, channel._cache, id_) + + self._channel = channel + self.id = id_ + self.id_hash = md5.new(id_).hexdigest() + self.date = None + self.order = None + self.content = None + self.cache_read() + + def update(self, entry): + """Update the item from the feedparser entry given.""" + for key in entry.keys(): + if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS: + # Ignored fields + pass + elif entry.has_key(key + "_parsed"): + # Ignore unparsed date fields + pass + elif key.endswith("_detail"): + # retain name, email, and language sub-fields + if entry[key].has_key('name') and entry[key].name: + self.set_as_string(key.replace("_detail","_name"), \ + entry[key].name) + if entry[key].has_key('email') and entry[key].email: + self.set_as_string(key.replace("_detail","_email"), \ + entry[key].email) + if entry[key].has_key('language') and entry[key].language and \ + (not self._channel.has_key('language') or \ + entry[key].language != self._channel.language): + self.set_as_string(key.replace("_detail","_language"), \ + entry[key].language) + elif key.endswith("_parsed"): + # Date fields + if entry[key] is not None: + self.set_as_date(key[:-len("_parsed")], entry[key]) + elif key == "source": + # Source field: save both url and value + if entry[key].has_key("value"): + self.set_as_string(key + "_name", entry[key].value) + if entry[key].has_key("url"): + self.set_as_string(key + "_link", entry[key].url) + elif key == "content": + # Content field: concatenate the values + value = "" + for item in entry[key]: + if item.type == 'text/html': + item.value = sanitize.HTML(item.value) + elif item.type == 'text/plain': + item.value = escape(item.value) + if item.has_key('language') and item.language and \ + (not self._channel.has_key('language') or + item.language != self._channel.language) : + self.set_as_string(key + "_language", item.language) + value += cache.utf8(item.value) + self.set_as_string(key, value) + elif isinstance(entry[key], (str, unicode)): + # String fields + try: + detail = key + '_detail' + if entry.has_key(detail): + if entry[detail].has_key('type'): + if entry[detail].type == 'text/html': + entry[key] = sanitize.HTML(entry[key]) + elif entry[detail].type == 'text/plain': + entry[key] = escape(entry[key]) + self.set_as_string(key, entry[key]) + except KeyboardInterrupt: + raise + except: + log.exception("Ignored '%s' of <%s>, unknown format", + key, self.id) + + # Generate the date field if we need to + self.get_date("date") + + def get_date(self, key): + """Get (or update) the date key. + + We check whether the date the entry claims to have been changed is + since we last updated this feed and when we pulled the feed off the + site. + + If it is then it's probably not bogus, and we'll sort accordingly. + + If it isn't then we bound it appropriately, this ensures that + entries appear in posting sequence but don't overlap entries + added in previous updates and don't creep into the next one. + """ + + for other_key in ("updated", "modified", "published", "issued", "created"): + if self.has_key(other_key): + date = self.get_as_date(other_key) + break + else: + date = None + + if date is not None: + if date > self._channel.updated: + date = self._channel.updated +# elif date < self._channel.last_updated: +# date = self._channel.updated + elif self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_date(key) + else: + date = self._channel.updated + + self.set_as_date(key, date) + return date + + def get_content(self, key): + """Return the key containing the content.""" + for key in ("content", "tagline", "summary"): + if self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_string(key) + + return "" diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/atomstyler.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/atomstyler.py new file mode 100755 index 0000000..9220702 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/atomstyler.py @@ -0,0 +1,124 @@ +from xml.dom import minidom, Node +from urlparse import urlparse, urlunparse +from xml.parsers.expat import ExpatError +from htmlentitydefs import name2codepoint +import re + +# select and apply an xml:base for this entry +class relativize: + def __init__(self, parent): + self.score = {} + self.links = [] + self.collect_and_tally(parent) + self.base = self.select_optimal_base() + if self.base: + if not parent.hasAttribute('xml:base'): + self.rebase(parent) + parent.setAttribute('xml:base', self.base) + + # collect and tally cite, href and src attributes + def collect_and_tally(self,parent): + uri = None + if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') + if parent.hasAttribute('href'): uri=parent.getAttribute('href') + if parent.hasAttribute('src'): uri=parent.getAttribute('src') + + if uri: + parts=urlparse(uri) + if parts[0].lower() == 'http': + parts = (parts[1]+parts[2]).split('/') + base = None + for i in range(1,len(parts)): + base = tuple(parts[0:i]) + self.score[base] = self.score.get(base,0) + len(base) + if base and base not in self.links: self.links.append(base) + + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + self.collect_and_tally(node) + + # select the xml:base with the highest score + def select_optimal_base(self): + if not self.score: return None + for link in self.links: + self.score[link] = 0 + winner = max(self.score.values()) + if not winner: return None + for key in self.score.keys(): + if self.score[key] == winner: + if winner == len(key): return None + return urlunparse(('http', key[0], '/'.join(key[1:]), '', '', '')) + '/' + + # rewrite cite, href and src attributes using this base + def rebase(self,parent): + uri = None + if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') + if parent.hasAttribute('href'): uri=parent.getAttribute('href') + if parent.hasAttribute('src'): uri=parent.getAttribute('src') + if uri and uri.startswith(self.base): + uri = uri[len(self.base):] or '.' + if parent.hasAttribute('href'): uri=parent.setAttribute('href', uri) + if parent.hasAttribute('src'): uri=parent.setAttribute('src', uri) + + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + self.rebase(node) + +# convert type="html" to type="plain" or type="xhtml" as appropriate +def retype(parent): + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + + if node.hasAttribute('type') and node.getAttribute('type') == 'html': + if len(node.childNodes)==0: + node.removeAttribute('type') + elif len(node.childNodes)==1: + + # replace html entity defs with utf-8 + chunks=re.split('&(\w+);', node.childNodes[0].nodeValue) + for i in range(1,len(chunks),2): + if chunks[i] in ['amp', 'lt', 'gt', 'apos', 'quot']: + chunks[i] ='&' + chunks[i] +';' + elif chunks[i] in name2codepoint: + chunks[i]=unichr(name2codepoint[chunks[i]]) + else: + chunks[i]='&' + chunks[i] + ';' + text = u"".join(chunks) + + try: + # see if the resulting text is a well-formed XML fragment + div = '
%s
' + data = minidom.parseString((div % text.encode('utf-8'))) + + if text.find('<') < 0: + # plain text + node.removeAttribute('type') + text = data.documentElement.childNodes[0].nodeValue + node.childNodes[0].replaceWholeText(text) + + elif len(text) > 80: + # xhtml + node.setAttribute('type', 'xhtml') + node.removeChild(node.childNodes[0]) + node.appendChild(data.documentElement) + + except ExpatError: + # leave as html + pass + + else: + # recurse + retype(node) + + if parent.nodeName == 'entry': + relativize(parent) + +if __name__ == '__main__': + + # run styler on each file mention on the command line + import sys + for feed in sys.argv[1:]: + doc = minidom.parse(feed) + doc.normalize() + retype(doc.documentElement) + open(feed,'w').write(doc.toxml('utf-8')) diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/cache.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/cache.py new file mode 100755 index 0000000..dfc529b --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/cache.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Item cache. + +Between runs of Planet we need somewhere to store the feed information +we parsed, this is so we don't lose information when a particular feed +goes away or is too short to hold enough items. + +This module provides the code to handle this cache transparently enough +that the rest of the code can take the persistance for granted. +""" + +import os +import re + + +# Regular expressions to sanitise cache filenames +re_url_scheme = re.compile(r'^[^:]*://') +re_slash = re.compile(r'[?/]+') +re_initial_cruft = re.compile(r'^[,.]*') +re_final_cruft = re.compile(r'[,.]*$') + + +class CachedInfo: + """Cached information. + + This class is designed to hold information that is stored in a cache + between instances. It can act both as a dictionary (c['foo']) and + as an object (c.foo) to get and set values and supports both string + and date values. + + If you wish to support special fields you can derive a class off this + and implement get_FIELD and set_FIELD functions which will be + automatically called. + """ + STRING = "string" + DATE = "date" + NULL = "null" + + def __init__(self, cache, id_, root=0): + self._type = {} + self._value = {} + self._cached = {} + + self._cache = cache + self._id = id_.replace(" ", "%20") + self._root = root + + def cache_key(self, key): + """Return the cache key name for the given key.""" + key = key.replace(" ", "_") + if self._root: + return key + else: + return self._id + " " + key + + def cache_read(self): + """Read information from the cache.""" + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + if self._cache.has_key(keys_key): + keys = self._cache[keys_key].split(" ") + else: + return + + for key in keys: + cache_key = self.cache_key(key) + if not self._cached.has_key(key) or self._cached[key]: + # Key either hasn't been loaded, or is one for the cache + self._value[key] = self._cache[cache_key] + self._type[key] = self._cache[cache_key + " type"] + self._cached[key] = 1 + + def cache_write(self, sync=1): + """Write information to the cache.""" + self.cache_clear(sync=0) + + keys = [] + for key in self.keys(): + cache_key = self.cache_key(key) + if not self._cached[key]: + if self._cache.has_key(cache_key): + # Non-cached keys need to be cleared + del(self._cache[cache_key]) + del(self._cache[cache_key + " type"]) + continue + + keys.append(key) + self._cache[cache_key] = self._value[key] + self._cache[cache_key + " type"] = self._type[key] + + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + self._cache[keys_key] = " ".join(keys) + if sync: + self._cache.sync() + + def cache_clear(self, sync=1): + """Remove information from the cache.""" + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + if self._cache.has_key(keys_key): + keys = self._cache[keys_key].split(" ") + del(self._cache[keys_key]) + else: + return + + for key in keys: + cache_key = self.cache_key(key) + del(self._cache[cache_key]) + del(self._cache[cache_key + " type"]) + + if sync: + self._cache.sync() + + def has_key(self, key): + """Check whether the key exists.""" + key = key.replace(" ", "_") + return self._value.has_key(key) + + def key_type(self, key): + """Return the key type.""" + key = key.replace(" ", "_") + return self._type[key] + + def set(self, key, value, cached=1): + """Set the value of the given key. + + If a set_KEY function exists that is called otherwise the + string function is called and the date function if that fails + (it nearly always will). + """ + key = key.replace(" ", "_") + + try: + func = getattr(self, "set_" + key) + except AttributeError: + pass + else: + return func(key, value) + + if value == None: + return self.set_as_null(key, value) + else: + try: + return self.set_as_string(key, value) + except TypeError: + return self.set_as_date(key, value) + + def get(self, key): + """Return the value of the given key. + + If a get_KEY function exists that is called otherwise the + correctly typed function is called if that exists. + """ + key = key.replace(" ", "_") + + try: + func = getattr(self, "get_" + key) + except AttributeError: + pass + else: + return func(key) + + try: + func = getattr(self, "get_as_" + self._type[key]) + except AttributeError: + pass + else: + return func(key) + + return self._value[key] + + def set_as_string(self, key, value, cached=1): + """Set the key to the string value. + + The value is converted to UTF-8 if it is a Unicode string, otherwise + it's assumed to have failed decoding (feedparser tries pretty hard) + so has all non-ASCII characters stripped. + """ + value = utf8(value) + + key = key.replace(" ", "_") + self._value[key] = value + self._type[key] = self.STRING + self._cached[key] = cached + + def get_as_string(self, key): + """Return the key as a string value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + return self._value[key] + + def set_as_date(self, key, value, cached=1): + """Set the key to the date value. + + The date should be a 9-item tuple as returned by time.gmtime(). + """ + value = " ".join([ str(s) for s in value ]) + + key = key.replace(" ", "_") + self._value[key] = value + self._type[key] = self.DATE + self._cached[key] = cached + + def get_as_date(self, key): + """Return the key as a date value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + value = self._value[key] + return tuple([ int(i) for i in value.split(" ") ]) + + def set_as_null(self, key, value, cached=1): + """Set the key to the null value. + + This only exists to make things less magic. + """ + key = key.replace(" ", "_") + self._value[key] = "" + self._type[key] = self.NULL + self._cached[key] = cached + + def get_as_null(self, key): + """Return the key as the null value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + return None + + def del_key(self, key): + """Delete the given key.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + del(self._value[key]) + del(self._type[key]) + del(self._cached[key]) + + def keys(self): + """Return the list of cached keys.""" + return self._value.keys() + + def __iter__(self): + """Iterate the cached keys.""" + return iter(self._value.keys()) + + # Special methods + __contains__ = has_key + __setitem__ = set_as_string + __getitem__ = get + __delitem__ = del_key + __delattr__ = del_key + + def __setattr__(self, key, value): + if key.startswith("_"): + self.__dict__[key] = value + else: + self.set(key, value) + + def __getattr__(self, key): + if self.has_key(key): + return self.get(key) + else: + raise AttributeError, key + + +def filename(directory, filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + filename = re_initial_cruft.sub("", filename) + filename = re_final_cruft.sub("", filename) + + return os.path.join(directory, filename) + +def utf8(value): + """Return the value as a UTF-8 string.""" + if type(value) == type(u''): + return value.encode("utf-8") + else: + try: + return unicode(value, "utf-8").encode("utf-8") + except UnicodeError: + try: + return unicode(value, "iso-8859-1").encode("utf-8") + except UnicodeError: + return unicode(value, "ascii", "replace").encode("utf-8") diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/compat_logging/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/compat_logging/__init__.py new file mode 100755 index 0000000..3bd0c6d --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/compat_logging/__init__.py @@ -0,0 +1,1196 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'sys._getframe()' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, os, types, time, string, cStringIO + +try: + import thread + import threading +except ImportError: + thread = None + +__author__ = "Vinay Sajip " +__status__ = "beta" +__version__ = "0.4.8.1" +__date__ = "26 June 2003" + +#--------------------------------------------------------------------------- +# Miscellaneous module data +#--------------------------------------------------------------------------- + +# +#_srcfile is used when walking the stack to check when we've got the first +# caller stack frame. +# +if string.lower(__file__[-4:]) in ['.pyc', '.pyo']: + _srcfile = __file__[:-4] + '.py' +else: + _srcfile = __file__ +_srcfile = os.path.normcase(_srcfile) + +# _srcfile is only used in conjunction with sys._getframe(). +# To provide compatibility with older versions of Python, set _srcfile +# to None if _getframe() is not available; this value will prevent +# findCaller() from being called. +if not hasattr(sys, "_getframe"): + _srcfile = None + +# +#_startTime is used as the base when calculating the relative time of events +# +_startTime = time.time() + +# +#raiseExceptions is used to see if exceptions during handling should be +#propagated +# +raiseExceptions = 1 + +#--------------------------------------------------------------------------- +# Level related stuff +#--------------------------------------------------------------------------- +# +# Default levels and level names, these can be replaced with any positive set +# of values having corresponding names. There is a pseudo-level, NOTSET, which +# is only really there as a lower limit for user-defined levels. Handlers and +# loggers are initialized with NOTSET so that they will log all messages, even +# at user-defined levels. +# +CRITICAL = 50 +FATAL = CRITICAL +ERROR = 40 +WARNING = 30 +WARN = WARNING +INFO = 20 +DEBUG = 10 +NOTSET = 0 + +_levelNames = { + CRITICAL : 'CRITICAL', + ERROR : 'ERROR', + WARNING : 'WARNING', + INFO : 'INFO', + DEBUG : 'DEBUG', + NOTSET : 'NOTSET', + 'CRITICAL' : CRITICAL, + 'ERROR' : ERROR, + 'WARN' : WARNING, + 'WARNING' : WARNING, + 'INFO' : INFO, + 'DEBUG' : DEBUG, + 'NOTSET' : NOTSET, +} + +def getLevelName(level): + """ + Return the textual representation of logging level 'level'. + + If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, + INFO, DEBUG) then you get the corresponding string. If you have + associated levels with names using addLevelName then the name you have + associated with 'level' is returned. Otherwise, the string + "Level %s" % level is returned. + """ + return _levelNames.get(level, ("Level %s" % level)) + +def addLevelName(level, levelName): + """ + Associate 'levelName' with 'level'. + + This is used when converting levels to text during message formatting. + """ + _acquireLock() + try: #unlikely to cause an exception, but you never know... + _levelNames[level] = levelName + _levelNames[levelName] = level + finally: + _releaseLock() + +#--------------------------------------------------------------------------- +# Thread-related stuff +#--------------------------------------------------------------------------- + +# +#_lock is used to serialize access to shared data structures in this module. +#This needs to be an RLock because fileConfig() creates Handlers and so +#might arbitrary user threads. Since Handler.__init__() updates the shared +#dictionary _handlers, it needs to acquire the lock. But if configuring, +#the lock would already have been acquired - so we need an RLock. +#The same argument applies to Loggers and Manager.loggerDict. +# +_lock = None + +def _acquireLock(): + """ + Acquire the module-level lock for serializing access to shared data. + + This should be released with _releaseLock(). + """ + global _lock + if (not _lock) and thread: + _lock = threading.RLock() + if _lock: + _lock.acquire() + +def _releaseLock(): + """ + Release the module-level lock acquired by calling _acquireLock(). + """ + if _lock: + _lock.release() + +#--------------------------------------------------------------------------- +# The logging record +#--------------------------------------------------------------------------- + +class LogRecord: + """ + A LogRecord instance represents an event being logged. + + LogRecord instances are created every time something is logged. They + contain all the information pertinent to the event being logged. The + main information passed in is in msg and args, which are combined + using str(msg) % args to create the message field of the record. The + record also includes information such as when the record was created, + the source line where the logging call was made, and any exception + information to be logged. + """ + def __init__(self, name, level, pathname, lineno, msg, args, exc_info): + """ + Initialize a logging record with interesting information. + """ + ct = time.time() + self.name = name + self.msg = msg + self.args = args + self.levelname = getLevelName(level) + self.levelno = level + self.pathname = pathname + try: + self.filename = os.path.basename(pathname) + self.module = os.path.splitext(self.filename)[0] + except: + self.filename = pathname + self.module = "Unknown module" + self.exc_info = exc_info + self.lineno = lineno + self.created = ct + self.msecs = (ct - long(ct)) * 1000 + self.relativeCreated = (self.created - _startTime) * 1000 + if thread: + self.thread = thread.get_ident() + else: + self.thread = None + if hasattr(os, 'getpid'): + self.process = os.getpid() + else: + self.process = None + + def __str__(self): + return ''%(self.name, self.levelno, + self.pathname, self.lineno, self.msg) + + def getMessage(self): + """ + Return the message for this LogRecord. + + Return the message for this LogRecord after merging any user-supplied + arguments with the message. + """ + if not hasattr(types, "UnicodeType"): #if no unicode support... + msg = str(self.msg) + else: + try: + msg = str(self.msg) + except UnicodeError: + msg = self.msg #Defer encoding till later + if self.args: + msg = msg % self.args + return msg + +def makeLogRecord(dict): + """ + Make a LogRecord whose attributes are defined by the specified dictionary, + This function is useful for converting a logging event received over + a socket connection (which is sent as a dictionary) into a LogRecord + instance. + """ + rv = LogRecord(None, None, "", 0, "", (), None) + rv.__dict__.update(dict) + return rv + +#--------------------------------------------------------------------------- +# Formatter classes and functions +#--------------------------------------------------------------------------- + +class Formatter: + """ + Formatter instances are used to convert a LogRecord to text. + + Formatters need to know how a LogRecord is constructed. They are + responsible for converting a LogRecord to (usually) a string which can + be interpreted by either a human or an external system. The base Formatter + allows a formatting string to be specified. If none is supplied, the + default value of "%s(message)\\n" is used. + + The Formatter can be initialized with a format string which makes use of + knowledge of the LogRecord attributes - e.g. the default value mentioned + above makes use of the fact that the user's message and arguments are pre- + formatted into a LogRecord's message attribute. Currently, the useful + attributes in a LogRecord are described by: + + %(name)s Name of the logger (logging channel) + %(levelno)s Numeric logging level for the message (DEBUG, INFO, + WARNING, ERROR, CRITICAL) + %(levelname)s Text logging level for the message ("DEBUG", "INFO", + "WARNING", "ERROR", "CRITICAL") + %(pathname)s Full pathname of the source file where the logging + call was issued (if available) + %(filename)s Filename portion of pathname + %(module)s Module (name portion of filename) + %(lineno)d Source line number where the logging call was issued + (if available) + %(created)f Time when the LogRecord was created (time.time() + return value) + %(asctime)s Textual time when the LogRecord was created + %(msecs)d Millisecond portion of the creation time + %(relativeCreated)d Time in milliseconds when the LogRecord was created, + relative to the time the logging module was loaded + (typically at application startup time) + %(thread)d Thread ID (if available) + %(process)d Process ID (if available) + %(message)s The result of record.getMessage(), computed just as + the record is emitted + """ + + converter = time.localtime + + def __init__(self, fmt=None, datefmt=None): + """ + Initialize the formatter with specified format strings. + + Initialize the formatter either with the specified format string, or a + default as described above. Allow for specialized date formatting with + the optional datefmt argument (if omitted, you get the ISO8601 format). + """ + if fmt: + self._fmt = fmt + else: + self._fmt = "%(message)s" + self.datefmt = datefmt + + def formatTime(self, record, datefmt=None): + """ + Return the creation time of the specified LogRecord as formatted text. + + This method should be called from format() by a formatter which + wants to make use of a formatted time. This method can be overridden + in formatters to provide for any specific requirement, but the + basic behaviour is as follows: if datefmt (a string) is specified, + it is used with time.strftime() to format the creation time of the + record. Otherwise, the ISO8601 format is used. The resulting + string is returned. This function uses a user-configurable function + to convert the creation time to a tuple. By default, time.localtime() + is used; to change this for a particular formatter instance, set the + 'converter' attribute to a function with the same signature as + time.localtime() or time.gmtime(). To change it for all formatters, + for example if you want all logging times to be shown in GMT, + set the 'converter' attribute in the Formatter class. + """ + ct = self.converter(record.created) + if datefmt: + s = time.strftime(datefmt, ct) + else: + t = time.strftime("%Y-%m-%d %H:%M:%S", ct) + s = "%s,%03d" % (t, record.msecs) + return s + + def formatException(self, ei): + """ + Format and return the specified exception information as a string. + + This default implementation just uses + traceback.print_exception() + """ + import traceback + sio = cStringIO.StringIO() + traceback.print_exception(ei[0], ei[1], ei[2], None, sio) + s = sio.getvalue() + sio.close() + if s[-1] == "\n": + s = s[:-1] + return s + + def format(self, record): + """ + Format the specified record as text. + + The record's attribute dictionary is used as the operand to a + string formatting operation which yields the returned string. + Before formatting the dictionary, a couple of preparatory steps + are carried out. The message attribute of the record is computed + using LogRecord.getMessage(). If the formatting string contains + "%(asctime)", formatTime() is called to format the event time. + If there is exception information, it is formatted using + formatException() and appended to the message. + """ + record.message = record.getMessage() + if string.find(self._fmt,"%(asctime)") >= 0: + record.asctime = self.formatTime(record, self.datefmt) + s = self._fmt % record.__dict__ + if record.exc_info: + if s[-1] != "\n": + s = s + "\n" + s = s + self.formatException(record.exc_info) + return s + +# +# The default formatter to use when no other is specified +# +_defaultFormatter = Formatter() + +class BufferingFormatter: + """ + A formatter suitable for formatting a number of records. + """ + def __init__(self, linefmt=None): + """ + Optionally specify a formatter which will be used to format each + individual record. + """ + if linefmt: + self.linefmt = linefmt + else: + self.linefmt = _defaultFormatter + + def formatHeader(self, records): + """ + Return the header string for the specified records. + """ + return "" + + def formatFooter(self, records): + """ + Return the footer string for the specified records. + """ + return "" + + def format(self, records): + """ + Format the specified records and return the result as a string. + """ + rv = "" + if len(records) > 0: + rv = rv + self.formatHeader(records) + for record in records: + rv = rv + self.linefmt.format(record) + rv = rv + self.formatFooter(records) + return rv + +#--------------------------------------------------------------------------- +# Filter classes and functions +#--------------------------------------------------------------------------- + +class Filter: + """ + Filter instances are used to perform arbitrary filtering of LogRecords. + + Loggers and Handlers can optionally use Filter instances to filter + records as desired. The base filter class only allows events which are + below a certain point in the logger hierarchy. For example, a filter + initialized with "A.B" will allow events logged by loggers "A.B", + "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If + initialized with the empty string, all events are passed. + """ + def __init__(self, name=''): + """ + Initialize a filter. + + Initialize with the name of the logger which, together with its + children, will have its events allowed through the filter. If no + name is specified, allow every event. + """ + self.name = name + self.nlen = len(name) + + def filter(self, record): + """ + Determine if the specified record is to be logged. + + Is the specified record to be logged? Returns 0 for no, nonzero for + yes. If deemed appropriate, the record may be modified in-place. + """ + if self.nlen == 0: + return 1 + elif self.name == record.name: + return 1 + elif string.find(record.name, self.name, 0, self.nlen) != 0: + return 0 + return (record.name[self.nlen] == ".") + +class Filterer: + """ + A base class for loggers and handlers which allows them to share + common code. + """ + def __init__(self): + """ + Initialize the list of filters to be an empty list. + """ + self.filters = [] + + def addFilter(self, filter): + """ + Add the specified filter to this handler. + """ + if not (filter in self.filters): + self.filters.append(filter) + + def removeFilter(self, filter): + """ + Remove the specified filter from this handler. + """ + if filter in self.filters: + self.filters.remove(filter) + + def filter(self, record): + """ + Determine if a record is loggable by consulting all the filters. + + The default is to allow the record to be logged; any filter can veto + this and the record is then dropped. Returns a zero value if a record + is to be dropped, else non-zero. + """ + rv = 1 + for f in self.filters: + if not f.filter(record): + rv = 0 + break + return rv + +#--------------------------------------------------------------------------- +# Handler classes and functions +#--------------------------------------------------------------------------- + +_handlers = {} #repository of handlers (for flushing when shutdown called) + +class Handler(Filterer): + """ + Handler instances dispatch logging events to specific destinations. + + The base handler class. Acts as a placeholder which defines the Handler + interface. Handlers can optionally use Formatter instances to format + records as desired. By default, no formatter is specified; in this case, + the 'raw' message as determined by record.message is logged. + """ + def __init__(self, level=NOTSET): + """ + Initializes the instance - basically setting the formatter to None + and the filter list to empty. + """ + Filterer.__init__(self) + self.level = level + self.formatter = None + #get the module data lock, as we're updating a shared structure. + _acquireLock() + try: #unlikely to raise an exception, but you never know... + _handlers[self] = 1 + finally: + _releaseLock() + self.createLock() + + def createLock(self): + """ + Acquire a thread lock for serializing access to the underlying I/O. + """ + if thread: + self.lock = thread.allocate_lock() + else: + self.lock = None + + def acquire(self): + """ + Acquire the I/O thread lock. + """ + if self.lock: + self.lock.acquire() + + def release(self): + """ + Release the I/O thread lock. + """ + if self.lock: + self.lock.release() + + def setLevel(self, level): + """ + Set the logging level of this handler. + """ + self.level = level + + def format(self, record): + """ + Format the specified record. + + If a formatter is set, use it. Otherwise, use the default formatter + for the module. + """ + if self.formatter: + fmt = self.formatter + else: + fmt = _defaultFormatter + return fmt.format(record) + + def emit(self, record): + """ + Do whatever it takes to actually log the specified logging record. + + This version is intended to be implemented by subclasses and so + raises a NotImplementedError. + """ + raise NotImplementedError, 'emit must be implemented '\ + 'by Handler subclasses' + + def handle(self, record): + """ + Conditionally emit the specified logging record. + + Emission depends on filters which may have been added to the handler. + Wrap the actual emission of the record with acquisition/release of + the I/O thread lock. Returns whether the filter passed the record for + emission. + """ + rv = self.filter(record) + if rv: + self.acquire() + try: + self.emit(record) + finally: + self.release() + return rv + + def setFormatter(self, fmt): + """ + Set the formatter for this handler. + """ + self.formatter = fmt + + def flush(self): + """ + Ensure all logging output has been flushed. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def close(self): + """ + Tidy up any resources used by the handler. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def handleError(self, record): + """ + Handle errors which occur during an emit() call. + + This method should be called from handlers when an exception is + encountered during an emit() call. If raiseExceptions is false, + exceptions get silently ignored. This is what is mostly wanted + for a logging system - most users will not care about errors in + the logging system, they are more interested in application errors. + You could, however, replace this with a custom handler if you wish. + The record which was being processed is passed in to this method. + """ + if raiseExceptions: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + +class StreamHandler(Handler): + """ + A handler class which writes logging records, appropriately formatted, + to a stream. Note that this class does not close the stream, as + sys.stdout or sys.stderr may be used. + """ + def __init__(self, strm=None): + """ + Initialize the handler. + + If strm is not specified, sys.stderr is used. + """ + Handler.__init__(self) + if not strm: + strm = sys.stderr + self.stream = strm + self.formatter = None + + def flush(self): + """ + Flushes the stream. + """ + self.stream.flush() + + def emit(self, record): + """ + Emit a record. + + If a formatter is specified, it is used to format the record. + The record is then written to the stream with a trailing newline + [N.B. this may be removed depending on feedback]. If exception + information is present, it is formatted using + traceback.print_exception and appended to the stream. + """ + try: + msg = self.format(record) + if not hasattr(types, "UnicodeType"): #if no unicode support... + self.stream.write("%s\n" % msg) + else: + try: + self.stream.write("%s\n" % msg) + except UnicodeError: + self.stream.write("%s\n" % msg.encode("UTF-8")) + self.flush() + except: + self.handleError(record) + +class FileHandler(StreamHandler): + """ + A handler class which writes formatted logging records to disk files. + """ + def __init__(self, filename, mode="a"): + """ + Open the specified file and use it as the stream for logging. + """ + StreamHandler.__init__(self, open(filename, mode)) + self.baseFilename = filename + self.mode = mode + + def close(self): + """ + Closes the stream. + """ + self.stream.close() + +#--------------------------------------------------------------------------- +# Manager classes and functions +#--------------------------------------------------------------------------- + +class PlaceHolder: + """ + PlaceHolder instances are used in the Manager logger hierarchy to take + the place of nodes for which no loggers have been defined [FIXME add + example]. + """ + def __init__(self, alogger): + """ + Initialize with the specified logger being a child of this placeholder. + """ + self.loggers = [alogger] + + def append(self, alogger): + """ + Add the specified logger as a child of this placeholder. + """ + if alogger not in self.loggers: + self.loggers.append(alogger) + +# +# Determine which class to use when instantiating loggers. +# +_loggerClass = None + +def setLoggerClass(klass): + """ + Set the class to be used when instantiating a logger. The class should + define __init__() such that only a name argument is required, and the + __init__() should call Logger.__init__() + """ + if klass != Logger: + if not issubclass(klass, Logger): + raise TypeError, "logger not derived from logging.Logger: " + \ + klass.__name__ + global _loggerClass + _loggerClass = klass + +class Manager: + """ + There is [under normal circumstances] just one Manager instance, which + holds the hierarchy of loggers. + """ + def __init__(self, rootnode): + """ + Initialize the manager with the root node of the logger hierarchy. + """ + self.root = rootnode + self.disable = 0 + self.emittedNoHandlerWarning = 0 + self.loggerDict = {} + + def getLogger(self, name): + """ + Get a logger with the specified name (channel name), creating it + if it doesn't yet exist. + + If a PlaceHolder existed for the specified name [i.e. the logger + didn't exist but a child of it did], replace it with the created + logger and fix up the parent/child references which pointed to the + placeholder to now point to the logger. + """ + rv = None + _acquireLock() + try: + if self.loggerDict.has_key(name): + rv = self.loggerDict[name] + if isinstance(rv, PlaceHolder): + ph = rv + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupChildren(ph, rv) + self._fixupParents(rv) + else: + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupParents(rv) + finally: + _releaseLock() + return rv + + def _fixupParents(self, alogger): + """ + Ensure that there are either loggers or placeholders all the way + from the specified logger to the root of the logger hierarchy. + """ + name = alogger.name + i = string.rfind(name, ".") + rv = None + while (i > 0) and not rv: + substr = name[:i] + if not self.loggerDict.has_key(substr): + self.loggerDict[substr] = PlaceHolder(alogger) + else: + obj = self.loggerDict[substr] + if isinstance(obj, Logger): + rv = obj + else: + assert isinstance(obj, PlaceHolder) + obj.append(alogger) + i = string.rfind(name, ".", 0, i - 1) + if not rv: + rv = self.root + alogger.parent = rv + + def _fixupChildren(self, ph, alogger): + """ + Ensure that children of the placeholder ph are connected to the + specified logger. + """ + for c in ph.loggers: + if string.find(c.parent.name, alogger.name) <> 0: + alogger.parent = c.parent + c.parent = alogger + +#--------------------------------------------------------------------------- +# Logger classes and functions +#--------------------------------------------------------------------------- + +class Logger(Filterer): + """ + Instances of the Logger class represent a single logging channel. A + "logging channel" indicates an area of an application. Exactly how an + "area" is defined is up to the application developer. Since an + application can have any number of areas, logging channels are identified + by a unique string. Application areas can be nested (e.g. an area + of "input processing" might include sub-areas "read CSV files", "read + XLS files" and "read Gnumeric files"). To cater for this natural nesting, + channel names are organized into a namespace hierarchy where levels are + separated by periods, much like the Java or Python package namespace. So + in the instance given above, channel names might be "input" for the upper + level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. + There is no arbitrary limit to the depth of nesting. + """ + def __init__(self, name, level=NOTSET): + """ + Initialize the logger with a name and an optional level. + """ + Filterer.__init__(self) + self.name = name + self.level = level + self.parent = None + self.propagate = 1 + self.handlers = [] + self.disabled = 0 + + def setLevel(self, level): + """ + Set the logging level of this logger. + """ + self.level = level + +# def getRoot(self): +# """ +# Get the root of the logger hierarchy. +# """ +# return Logger.root + + def debug(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'DEBUG'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) + """ + if self.manager.disable >= DEBUG: + return + if DEBUG >= self.getEffectiveLevel(): + apply(self._log, (DEBUG, msg, args), kwargs) + + def info(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'INFO'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.info("Houston, we have a %s", "interesting problem", exc_info=1) + """ + if self.manager.disable >= INFO: + return + if INFO >= self.getEffectiveLevel(): + apply(self._log, (INFO, msg, args), kwargs) + + def warning(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'WARNING'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) + """ + if self.manager.disable >= WARNING: + return + if self.isEnabledFor(WARNING): + apply(self._log, (WARNING, msg, args), kwargs) + + warn = warning + + def error(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'ERROR'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.error("Houston, we have a %s", "major problem", exc_info=1) + """ + if self.manager.disable >= ERROR: + return + if self.isEnabledFor(ERROR): + apply(self._log, (ERROR, msg, args), kwargs) + + def exception(self, msg, *args): + """ + Convenience method for logging an ERROR with exception information. + """ + apply(self.error, (msg,) + args, {'exc_info': 1}) + + def critical(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'CRITICAL'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.critical("Houston, we have a %s", "major disaster", exc_info=1) + """ + if self.manager.disable >= CRITICAL: + return + if CRITICAL >= self.getEffectiveLevel(): + apply(self._log, (CRITICAL, msg, args), kwargs) + + fatal = critical + + def log(self, level, msg, *args, **kwargs): + """ + Log 'msg % args' with the severity 'level'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.log(level, "We have a %s", "mysterious problem", exc_info=1) + """ + if self.manager.disable >= level: + return + if self.isEnabledFor(level): + apply(self._log, (level, msg, args), kwargs) + + def findCaller(self): + """ + Find the stack frame of the caller so that we can note the source + file name and line number. + """ + f = sys._getframe(1) + while 1: + co = f.f_code + filename = os.path.normcase(co.co_filename) + if filename == _srcfile: + f = f.f_back + continue + return filename, f.f_lineno + + def makeRecord(self, name, level, fn, lno, msg, args, exc_info): + """ + A factory method which can be overridden in subclasses to create + specialized LogRecords. + """ + return LogRecord(name, level, fn, lno, msg, args, exc_info) + + def _log(self, level, msg, args, exc_info=None): + """ + Low-level logging routine which creates a LogRecord and then calls + all the handlers of this logger to handle the record. + """ + if _srcfile: + fn, lno = self.findCaller() + else: + fn, lno = "", 0 + if exc_info: + exc_info = sys.exc_info() + record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info) + self.handle(record) + + def handle(self, record): + """ + Call the handlers for the specified record. + + This method is used for unpickled records received from a socket, as + well as those created locally. Logger-level filtering is applied. + """ + if (not self.disabled) and self.filter(record): + self.callHandlers(record) + + def addHandler(self, hdlr): + """ + Add the specified handler to this logger. + """ + if not (hdlr in self.handlers): + self.handlers.append(hdlr) + + def removeHandler(self, hdlr): + """ + Remove the specified handler from this logger. + """ + if hdlr in self.handlers: + #hdlr.close() + self.handlers.remove(hdlr) + + def callHandlers(self, record): + """ + Pass a record to all relevant handlers. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. If no handler was found, output a one-off error + message to sys.stderr. Stop searching up the hierarchy whenever a + logger with the "propagate" attribute set to zero is found - that + will be the last logger whose handlers are called. + """ + c = self + found = 0 + while c: + for hdlr in c.handlers: + found = found + 1 + if record.levelno >= hdlr.level: + hdlr.handle(record) + if not c.propagate: + c = None #break out + else: + c = c.parent + if (found == 0) and not self.manager.emittedNoHandlerWarning: + sys.stderr.write("No handlers could be found for logger" + " \"%s\"\n" % self.name) + self.manager.emittedNoHandlerWarning = 1 + + def getEffectiveLevel(self): + """ + Get the effective level for this logger. + + Loop through this logger and its parents in the logger hierarchy, + looking for a non-zero logging level. Return the first one found. + """ + logger = self + while logger: + if logger.level: + return logger.level + logger = logger.parent + return NOTSET + + def isEnabledFor(self, level): + """ + Is this logger enabled for level 'level'? + """ + if self.manager.disable >= level: + return 0 + return level >= self.getEffectiveLevel() + +class RootLogger(Logger): + """ + A root logger is not that different to any other logger, except that + it must have a logging level and there is only one instance of it in + the hierarchy. + """ + def __init__(self, level): + """ + Initialize the logger with the name "root". + """ + Logger.__init__(self, "root", level) + +_loggerClass = Logger + +root = RootLogger(WARNING) +Logger.root = root +Logger.manager = Manager(Logger.root) + +#--------------------------------------------------------------------------- +# Configuration classes and functions +#--------------------------------------------------------------------------- + +BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" + +def basicConfig(): + """ + Do basic configuration for the logging system by creating a + StreamHandler with a default Formatter and adding it to the + root logger. + """ + if len(root.handlers) == 0: + hdlr = StreamHandler() + fmt = Formatter(BASIC_FORMAT) + hdlr.setFormatter(fmt) + root.addHandler(hdlr) + +#--------------------------------------------------------------------------- +# Utility functions at module level. +# Basically delegate everything to the root logger. +#--------------------------------------------------------------------------- + +def getLogger(name=None): + """ + Return a logger with the specified name, creating it if necessary. + + If no name is specified, return the root logger. + """ + if name: + return Logger.manager.getLogger(name) + else: + return root + +#def getRootLogger(): +# """ +# Return the root logger. +# +# Note that getLogger('') now does the same thing, so this function is +# deprecated and may disappear in the future. +# """ +# return root + +def critical(msg, *args, **kwargs): + """ + Log a message with severity 'CRITICAL' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.critical, (msg,)+args, kwargs) + +fatal = critical + +def error(msg, *args, **kwargs): + """ + Log a message with severity 'ERROR' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.error, (msg,)+args, kwargs) + +def exception(msg, *args): + """ + Log a message with severity 'ERROR' on the root logger, + with exception information. + """ + apply(error, (msg,)+args, {'exc_info': 1}) + +def warning(msg, *args, **kwargs): + """ + Log a message with severity 'WARNING' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.warning, (msg,)+args, kwargs) + +warn = warning + +def info(msg, *args, **kwargs): + """ + Log a message with severity 'INFO' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.info, (msg,)+args, kwargs) + +def debug(msg, *args, **kwargs): + """ + Log a message with severity 'DEBUG' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.debug, (msg,)+args, kwargs) + +def disable(level): + """ + Disable all logging calls less severe than 'level'. + """ + root.manager.disable = level + +def shutdown(): + """ + Perform any cleanup actions in the logging system (e.g. flushing + buffers). + + Should be called at application exit. + """ + for h in _handlers.keys(): + h.flush() + h.close() diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/compat_logging/config.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/compat_logging/config.py new file mode 100755 index 0000000..d4d08f0 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/compat_logging/config.py @@ -0,0 +1,299 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, logging.handlers, string, thread, threading, socket, struct, os + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + + +DEFAULT_LOGGING_CONFIG_PORT = 9030 +if sys.platform == "win32": + RESET_ERROR = 10054 #WSAECONNRESET +else: + RESET_ERROR = 104 #ECONNRESET + +# +# The following code implements a socket listener for on-the-fly +# reconfiguration of logging. +# +# _listener holds the server object doing the listening +_listener = None + +def fileConfig(fname, defaults=None): + """ + Read the logging configuration from a ConfigParser-format file. + + This can be called several times from an application, allowing an end user + the ability to select from various pre-canned configurations (if the + developer provides a mechanism to present the choices and load the chosen + configuration). + In versions of ConfigParser which have the readfp method [typically + shipped in 2.x versions of Python], you can pass in a file-like object + rather than a filename, in which case the file-like object will be read + using readfp. + """ + import ConfigParser + + cp = ConfigParser.ConfigParser(defaults) + if hasattr(cp, 'readfp') and hasattr(fname, 'readline'): + cp.readfp(fname) + else: + cp.read(fname) + #first, do the formatters... + flist = cp.get("formatters", "keys") + if len(flist): + flist = string.split(flist, ",") + formatters = {} + for form in flist: + sectname = "formatter_%s" % form + opts = cp.options(sectname) + if "format" in opts: + fs = cp.get(sectname, "format", 1) + else: + fs = None + if "datefmt" in opts: + dfs = cp.get(sectname, "datefmt", 1) + else: + dfs = None + f = logging.Formatter(fs, dfs) + formatters[form] = f + #next, do the handlers... + #critical section... + logging._acquireLock() + try: + try: + #first, lose the existing handlers... + logging._handlers.clear() + #now set up the new ones... + hlist = cp.get("handlers", "keys") + if len(hlist): + hlist = string.split(hlist, ",") + handlers = {} + fixups = [] #for inter-handler references + for hand in hlist: + sectname = "handler_%s" % hand + klass = cp.get(sectname, "class") + opts = cp.options(sectname) + if "formatter" in opts: + fmt = cp.get(sectname, "formatter") + else: + fmt = "" + klass = eval(klass, vars(logging)) + args = cp.get(sectname, "args") + args = eval(args, vars(logging)) + h = apply(klass, args) + if "level" in opts: + level = cp.get(sectname, "level") + h.setLevel(logging._levelNames[level]) + if len(fmt): + h.setFormatter(formatters[fmt]) + #temporary hack for FileHandler and MemoryHandler. + if klass == logging.handlers.MemoryHandler: + if "target" in opts: + target = cp.get(sectname,"target") + else: + target = "" + if len(target): #the target handler may not be loaded yet, so keep for later... + fixups.append((h, target)) + handlers[hand] = h + #now all handlers are loaded, fixup inter-handler references... + for fixup in fixups: + h = fixup[0] + t = fixup[1] + h.setTarget(handlers[t]) + #at last, the loggers...first the root... + llist = cp.get("loggers", "keys") + llist = string.split(llist, ",") + llist.remove("root") + sectname = "logger_root" + root = logging.root + log = root + opts = cp.options(sectname) + if "level" in opts: + level = cp.get(sectname, "level") + log.setLevel(logging._levelNames[level]) + for h in root.handlers[:]: + root.removeHandler(h) + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + log.addHandler(handlers[hand]) + #and now the others... + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + existing = root.manager.loggerDict.keys() + #now set up the new ones... + for log in llist: + sectname = "logger_%s" % log + qn = cp.get(sectname, "qualname") + opts = cp.options(sectname) + if "propagate" in opts: + propagate = cp.getint(sectname, "propagate") + else: + propagate = 1 + logger = logging.getLogger(qn) + if qn in existing: + existing.remove(qn) + if "level" in opts: + level = cp.get(sectname, "level") + logger.setLevel(logging._levelNames[level]) + for h in logger.handlers[:]: + logger.removeHandler(h) + logger.propagate = propagate + logger.disabled = 0 + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + logger.addHandler(handlers[hand]) + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + for log in existing: + root.manager.loggerDict[log].disabled = 1 + except: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + finally: + logging._releaseLock() + +def listen(port=DEFAULT_LOGGING_CONFIG_PORT): + """ + Start up a socket server on the specified port, and listen for new + configurations. + + These will be sent as a file suitable for processing by fileConfig(). + Returns a Thread object on which you can call start() to start the server, + and which you can join() when appropriate. To stop the server, call + stopListening(). + """ + if not thread: + raise NotImplementedError, "listen() needs threading to work" + + class ConfigStreamHandler(StreamRequestHandler): + """ + Handler for a logging configuration request. + + It expects a completely new logging configuration and uses fileConfig + to install it. + """ + def handle(self): + """ + Handle a request. + + Each request is expected to be a 4-byte length, + followed by the config file. Uses fileConfig() to do the + grunt work. + """ + import tempfile + try: + conn = self.connection + chunk = conn.recv(4) + if len(chunk) == 4: + slen = struct.unpack(">L", chunk)[0] + chunk = self.connection.recv(slen) + while len(chunk) < slen: + chunk = chunk + conn.recv(slen - len(chunk)) + #Apply new configuration. We'd like to be able to + #create a StringIO and pass that in, but unfortunately + #1.5.2 ConfigParser does not support reading file + #objects, only actual files. So we create a temporary + #file and remove it later. + file = tempfile.mktemp(".ini") + f = open(file, "w") + f.write(chunk) + f.close() + fileConfig(file) + os.remove(file) + except socket.error, e: + if type(e.args) != types.TupleType: + raise + else: + errcode = e.args[0] + if errcode != RESET_ERROR: + raise + + class ConfigSocketReceiver(ThreadingTCPServer): + """ + A simple TCP socket-based logging config receiver. + """ + + allow_reuse_address = 1 + + def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, + handler=None): + ThreadingTCPServer.__init__(self, (host, port), handler) + logging._acquireLock() + self.abort = 0 + logging._releaseLock() + self.timeout = 1 + + def serve_until_stopped(self): + import select + abort = 0 + while not abort: + rd, wr, ex = select.select([self.socket.fileno()], + [], [], + self.timeout) + if rd: + self.handle_request() + logging._acquireLock() + abort = self.abort + logging._releaseLock() + + def serve(rcvr, hdlr, port): + server = rcvr(port=port, handler=hdlr) + global _listener + logging._acquireLock() + _listener = server + logging._releaseLock() + server.serve_until_stopped() + + return threading.Thread(target=serve, + args=(ConfigSocketReceiver, + ConfigStreamHandler, port)) + +def stopListening(): + """ + Stop the listening server which was created with a call to listen(). + """ + global _listener + if _listener: + logging._acquireLock() + _listener.abort = 1 + _listener = None + logging._releaseLock() diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/compat_logging/handlers.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/compat_logging/handlers.py new file mode 100755 index 0000000..26ca8ad --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/compat_logging/handlers.py @@ -0,0 +1,728 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, socket, types, os, string, cPickle, struct, time + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + +# +# Some constants... +# + +DEFAULT_TCP_LOGGING_PORT = 9020 +DEFAULT_UDP_LOGGING_PORT = 9021 +DEFAULT_HTTP_LOGGING_PORT = 9022 +DEFAULT_SOAP_LOGGING_PORT = 9023 +SYSLOG_UDP_PORT = 514 + + +class RotatingFileHandler(logging.FileHandler): + def __init__(self, filename, mode="a", maxBytes=0, backupCount=0): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + """ + logging.FileHandler.__init__(self, filename, mode) + self.maxBytes = maxBytes + self.backupCount = backupCount + if maxBytes > 0: + self.mode = "a" + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + + self.stream.close() + if self.backupCount > 0: + for i in range(self.backupCount - 1, 0, -1): + sfn = "%s.%d" % (self.baseFilename, i) + dfn = "%s.%d" % (self.baseFilename, i + 1) + if os.path.exists(sfn): + #print "%s -> %s" % (sfn, dfn) + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.baseFilename + ".1" + if os.path.exists(dfn): + os.remove(dfn) + os.rename(self.baseFilename, dfn) + #print "%s -> %s" % (self.baseFilename, dfn) + self.stream = open(self.baseFilename, "w") + + def emit(self, record): + """ + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + """ + if self.maxBytes > 0: # are we rolling over? + msg = "%s\n" % self.format(record) + self.stream.seek(0, 2) #due to non-posix-compliant Windows feature + if self.stream.tell() + len(msg) >= self.maxBytes: + self.doRollover() + logging.FileHandler.emit(self, record) + + +class SocketHandler(logging.Handler): + """ + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + """ + + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + + The attribute 'closeOnError' is set to 1 - which means that if + a socket error occurs, the socket is silently closed and then + reopened on the next logging call. + """ + logging.Handler.__init__(self) + self.host = host + self.port = port + self.sock = None + self.closeOnError = 0 + + def makeSocket(self): + """ + A factory method which allows subclasses to define the precise + type of socket they want. + """ + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((self.host, self.port)) + return s + + def send(self, s): + """ + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + """ + if hasattr(self.sock, "sendall"): + self.sock.sendall(s) + else: + sentsofar = 0 + left = len(s) + while left > 0: + sent = self.sock.send(s[sentsofar:]) + sentsofar = sentsofar + sent + left = left - sent + + def makePickle(self, record): + """ + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + """ + s = cPickle.dumps(record.__dict__, 1) + #n = len(s) + #slen = "%c%c" % ((n >> 8) & 0xFF, n & 0xFF) + slen = struct.pack(">L", len(s)) + return slen + s + + def handleError(self, record): + """ + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + """ + if self.closeOnError and self.sock: + self.sock.close() + self.sock = None #try to reconnect next time + else: + logging.Handler.handleError(self, record) + + def emit(self, record): + """ + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + """ + try: + s = self.makePickle(record) + if not self.sock: + self.sock = self.makeSocket() + self.send(s) + except: + self.handleError(record) + + def close(self): + """ + Closes the socket. + """ + if self.sock: + self.sock.close() + self.sock = None + +class DatagramHandler(SocketHandler): + """ + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + """ + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + """ + SocketHandler.__init__(self, host, port) + self.closeOnError = 0 + + def makeSocket(self): + """ + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + """ + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + return s + + def send(self, s): + """ + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + """ + self.sock.sendto(s, (self.host, self.port)) + +class SysLogHandler(logging.Handler): + """ + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + """ + + # from : + # ====================================================================== + # priorities/facilities are encoded into a single 32-bit quantity, where + # the bottom 3 bits are the priority (0-7) and the top 28 bits are the + # facility (0-big number). Both the priorities and the facilities map + # roughly one-to-one to strings in the syslogd(8) source code. This + # mapping is included in this file. + # + # priorities (these are ordered) + + LOG_EMERG = 0 # system is unusable + LOG_ALERT = 1 # action must be taken immediately + LOG_CRIT = 2 # critical conditions + LOG_ERR = 3 # error conditions + LOG_WARNING = 4 # warning conditions + LOG_NOTICE = 5 # normal but significant condition + LOG_INFO = 6 # informational + LOG_DEBUG = 7 # debug-level messages + + # facility codes + LOG_KERN = 0 # kernel messages + LOG_USER = 1 # random user-level messages + LOG_MAIL = 2 # mail system + LOG_DAEMON = 3 # system daemons + LOG_AUTH = 4 # security/authorization messages + LOG_SYSLOG = 5 # messages generated internally by syslogd + LOG_LPR = 6 # line printer subsystem + LOG_NEWS = 7 # network news subsystem + LOG_UUCP = 8 # UUCP subsystem + LOG_CRON = 9 # clock daemon + LOG_AUTHPRIV = 10 # security/authorization messages (private) + + # other codes through 15 reserved for system use + LOG_LOCAL0 = 16 # reserved for local use + LOG_LOCAL1 = 17 # reserved for local use + LOG_LOCAL2 = 18 # reserved for local use + LOG_LOCAL3 = 19 # reserved for local use + LOG_LOCAL4 = 20 # reserved for local use + LOG_LOCAL5 = 21 # reserved for local use + LOG_LOCAL6 = 22 # reserved for local use + LOG_LOCAL7 = 23 # reserved for local use + + priority_names = { + "alert": LOG_ALERT, + "crit": LOG_CRIT, + "critical": LOG_CRIT, + "debug": LOG_DEBUG, + "emerg": LOG_EMERG, + "err": LOG_ERR, + "error": LOG_ERR, # DEPRECATED + "info": LOG_INFO, + "notice": LOG_NOTICE, + "panic": LOG_EMERG, # DEPRECATED + "warn": LOG_WARNING, # DEPRECATED + "warning": LOG_WARNING, + } + + facility_names = { + "auth": LOG_AUTH, + "authpriv": LOG_AUTHPRIV, + "cron": LOG_CRON, + "daemon": LOG_DAEMON, + "kern": LOG_KERN, + "lpr": LOG_LPR, + "mail": LOG_MAIL, + "news": LOG_NEWS, + "security": LOG_AUTH, # DEPRECATED + "syslog": LOG_SYSLOG, + "user": LOG_USER, + "uucp": LOG_UUCP, + "local0": LOG_LOCAL0, + "local1": LOG_LOCAL1, + "local2": LOG_LOCAL2, + "local3": LOG_LOCAL3, + "local4": LOG_LOCAL4, + "local5": LOG_LOCAL5, + "local6": LOG_LOCAL6, + "local7": LOG_LOCAL7, + } + + def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): + """ + Initialize a handler. + + If address is specified as a string, UNIX socket is used. + If facility is not specified, LOG_USER is used. + """ + logging.Handler.__init__(self) + + self.address = address + self.facility = facility + if type(address) == types.StringType: + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + # syslog may require either DGRAM or STREAM sockets + try: + self.socket.connect(address) + except socket.error: + self.socket.close() + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.socket.connect(address) + self.unixsocket = 1 + else: + self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.unixsocket = 0 + + self.formatter = None + + # curious: when talking to the unix-domain '/dev/log' socket, a + # zero-terminator seems to be required. this string is placed + # into a class variable so that it can be overridden if + # necessary. + log_format_string = '<%d>%s\000' + + def encodePriority (self, facility, priority): + """ + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + """ + if type(facility) == types.StringType: + facility = self.facility_names[facility] + if type(priority) == types.StringType: + priority = self.priority_names[priority] + return (facility << 3) | priority + + def close (self): + """ + Closes the socket. + """ + if self.unixsocket: + self.socket.close() + + def emit(self, record): + """ + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + """ + msg = self.format(record) + """ + We need to convert record level to lowercase, maybe this will + change in the future. + """ + msg = self.log_format_string % ( + self.encodePriority(self.facility, + string.lower(record.levelname)), + msg) + try: + if self.unixsocket: + self.socket.send(msg) + else: + self.socket.sendto(msg, self.address) + except: + self.handleError(record) + +class SMTPHandler(logging.Handler): + """ + A handler class which sends an SMTP email for each logging event. + """ + def __init__(self, mailhost, fromaddr, toaddrs, subject): + """ + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. + """ + logging.Handler.__init__(self) + if type(mailhost) == types.TupleType: + host, port = mailhost + self.mailhost = host + self.mailport = port + else: + self.mailhost = mailhost + self.mailport = None + self.fromaddr = fromaddr + if type(toaddrs) == types.StringType: + toaddrs = [toaddrs] + self.toaddrs = toaddrs + self.subject = subject + + def getSubject(self, record): + """ + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + """ + return self.subject + + weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + + monthname = [None, + 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + + def date_time(self): + """Return the current date and time formatted for a MIME header.""" + year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time()) + s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + self.weekdayname[wd], + day, self.monthname[month], year, + hh, mm, ss) + return s + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + """ + try: + import smtplib + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port) + msg = self.format(record) + msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( + self.fromaddr, + string.join(self.toaddrs, ","), + self.getSubject(record), + self.date_time(), msg) + smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.quit() + except: + self.handleError(record) + +class NTEventLogHandler(logging.Handler): + """ + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + """ + def __init__(self, appname, dllname=None, logtype="Application"): + logging.Handler.__init__(self) + try: + import win32evtlogutil, win32evtlog + self.appname = appname + self._welu = win32evtlogutil + if not dllname: + dllname = os.path.split(self._welu.__file__) + dllname = os.path.split(dllname[0]) + dllname = os.path.join(dllname[0], r'win32service.pyd') + self.dllname = dllname + self.logtype = logtype + self._welu.AddSourceToRegistry(appname, dllname, logtype) + self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE + self.typemap = { + logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, + logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, + logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, + } + except ImportError: + print "The Python Win32 extensions for NT (service, event "\ + "logging) appear not to be available." + self._welu = None + + def getMessageID(self, record): + """ + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + """ + return 1 + + def getEventCategory(self, record): + """ + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + """ + return 0 + + def getEventType(self, record): + """ + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + """ + return self.typemap.get(record.levelno, self.deftype) + + def emit(self, record): + """ + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + """ + if self._welu: + try: + id = self.getMessageID(record) + cat = self.getEventCategory(record) + type = self.getEventType(record) + msg = self.format(record) + self._welu.ReportEvent(self.appname, id, cat, type, [msg]) + except: + self.handleError(record) + + def close(self): + """ + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + """ + #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) + pass + +class HTTPHandler(logging.Handler): + """ + A class which sends records to a Web server, using either GET or + POST semantics. + """ + def __init__(self, host, url, method="GET"): + """ + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + """ + logging.Handler.__init__(self) + method = string.upper(method) + if method not in ["GET", "POST"]: + raise ValueError, "method must be GET or POST" + self.host = host + self.url = url + self.method = method + + def mapLogRecord(self, record): + """ + Default implementation of mapping the log record into a dict + that is send as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + """ + return record.__dict__ + + def emit(self, record): + """ + Emit a record. + + Send the record to the Web server as an URL-encoded dictionary + """ + try: + import httplib, urllib + h = httplib.HTTP(self.host) + url = self.url + data = urllib.urlencode(self.mapLogRecord(record)) + if self.method == "GET": + if (string.find(url, '?') >= 0): + sep = '&' + else: + sep = '?' + url = url + "%c%s" % (sep, data) + h.putrequest(self.method, url) + if self.method == "POST": + h.putheader("Content-length", str(len(data))) + h.endheaders() + if self.method == "POST": + h.send(data) + h.getreply() #can't do anything with the result + except: + self.handleError(record) + +class BufferingHandler(logging.Handler): + """ + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + """ + def __init__(self, capacity): + """ + Initialize the handler with the buffer size. + """ + logging.Handler.__init__(self) + self.capacity = capacity + self.buffer = [] + + def shouldFlush(self, record): + """ + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + """ + return (len(self.buffer) >= self.capacity) + + def emit(self, record): + """ + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + """ + self.buffer.append(record) + if self.shouldFlush(record): + self.flush() + + def flush(self): + """ + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + """ + self.buffer = [] + +class MemoryHandler(BufferingHandler): + """ + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + """ + def __init__(self, capacity, flushLevel=logging.ERROR, target=None): + """ + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + """ + BufferingHandler.__init__(self, capacity) + self.flushLevel = flushLevel + self.target = target + + def shouldFlush(self, record): + """ + Check for buffer full or a record at the flushLevel or higher. + """ + return (len(self.buffer) >= self.capacity) or \ + (record.levelno >= self.flushLevel) + + def setTarget(self, target): + """ + Set the target handler for this handler. + """ + self.target = target + + def flush(self): + """ + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + """ + if self.target: + for record in self.buffer: + self.target.handle(record) + self.buffer = [] + + def close(self): + """ + Flush, set the target to None and lose the buffer. + """ + self.flush() + self.target = None + self.buffer = [] diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/feedparser.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/feedparser.py new file mode 100755 index 0000000..615ee7e --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/feedparser.py @@ -0,0 +1,2931 @@ +#!/usr/bin/env python +"""Universal feed parser + +Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds + +Visit http://feedparser.org/ for the latest version +Visit http://feedparser.org/docs/ for the latest documentation + +Required: Python 2.1 or later +Recommended: Python 2.3 or later +Recommended: CJKCodecs and iconv_codec +""" + +__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs" +__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.""" +__author__ = "Mark Pilgrim " +__contributors__ = ["Jason Diamond ", + "John Beimler ", + "Fazal Majid ", + "Aaron Swartz ", + "Kevin Marks "] +_debug = 0 + +# HTTP "User-Agent" header to send to servers when downloading feeds. +# If you are embedding feedparser in a larger application, you should +# change this to your application name and URL. +USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ + +# HTTP "Accept" header to send to servers when downloading feeds. If you don't +# want to send an Accept header, set this to None. +ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" + +# List of preferred XML parsers, by SAX driver name. These will be tried first, +# but if they're not installed, Python will keep searching through its own list +# of pre-installed parsers until it finds one that supports everything we need. +PREFERRED_XML_PARSERS = ["drv_libxml2"] + +# If you want feedparser to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +# ---------- required modules (should come with any Python distribution) ---------- +import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 +try: + from cStringIO import StringIO as _StringIO +except: + from StringIO import StringIO as _StringIO + +# ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- + +# gzip is included with most Python distributions, but may not be available if you compiled your own +try: + import gzip +except: + gzip = None +try: + import zlib +except: + zlib = None + +# If a real XML parser is available, feedparser will attempt to use it. feedparser has +# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the +# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some +# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. +try: + import xml.sax + xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers + from xml.sax.saxutils import escape as _xmlescape + _XML_AVAILABLE = 1 +except: + _XML_AVAILABLE = 0 + def _xmlescape(data,entities={}): + data = data.replace('&', '&') + data = data.replace('>', '>') + data = data.replace('<', '<') + for char, entity in entities: + data = data.replace(char, entity) + return data + +# base64 support for Atom feeds that contain embedded binary data +try: + import base64, binascii +except: + base64 = binascii = None + +# cjkcodecs and iconv_codec provide support for more character encodings. +# Both are available from http://cjkpython.i18n.org/ +try: + import cjkcodecs.aliases +except: + pass +try: + import iconv_codec +except: + pass + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 +except: + chardet = None + +# ---------- don't touch these ---------- +class ThingsNobodyCaresAboutButMe(Exception): pass +class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass +class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass +class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass +class UndeclaredNamespace(Exception): pass + +sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') +sgmllib.special = re.compile('' % (tag, self.strattrs(attrs)), escape=0) + + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # special hack for better tracking of empty textinput/image elements in illformed feeds + if (not prefix) and tag not in ('title', 'link', 'description', 'name'): + self.intextinput = 0 + if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): + self.inimage = 0 + + # call special handler (if defined) or default handler + methodname = '_start_' + prefix + suffix + try: + method = getattr(self, methodname) + return method(attrsD) + except AttributeError: + return self.push(prefix + suffix, 1) + + def unknown_endtag(self, tag): + if _debug: sys.stderr.write('end %s\n' % tag) + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # call special handler (if defined) or default handler + methodname = '_end_' + prefix + suffix + try: + method = getattr(self, methodname) + method() + except AttributeError: + self.pop(prefix + suffix) + + # track inline content + if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + # element declared itself as escaped markup, but it isn't really + self.contentparams['type'] = 'application/xhtml+xml' + if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': + tag = tag.split(':')[-1] + self.handle_data('' % tag, escape=0) + + # track xml:base and xml:lang going out of scope + if self.basestack: + self.basestack.pop() + if self.basestack and self.basestack[-1]: + self.baseuri = self.basestack[-1] + if self.langstack: + self.langstack.pop() + if self.langstack: # and (self.langstack[-1] is not None): + self.lang = self.langstack[-1] + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + if not self.elementstack: return + ref = ref.lower() + if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): + text = '&#%s;' % ref + else: + if ref[0] == 'x': + c = int(ref[1:], 16) + else: + c = int(ref) + text = unichr(c).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + if not self.elementstack: return + if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) + if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): + text = '&%s;' % ref + else: + # entity resolution graciously donated by Aaron Swartz + def name2cp(k): + import htmlentitydefs + if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 + return htmlentitydefs.name2codepoint[k] + k = htmlentitydefs.entitydefs[k] + if k.startswith('&#') and k.endswith(';'): + return int(k[2:-1]) # not in latin-1 + return ord(k) + try: name2cp(ref) + except KeyError: text = '&%s;' % ref + else: text = unichr(name2cp(ref)).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_data(self, text, escape=1): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + if not self.elementstack: return + if escape and self.contentparams.get('type') == 'application/xhtml+xml': + text = _xmlescape(text) + self.elementstack[-1][2].append(text) + + def handle_comment(self, text): + # called for each comment, e.g. + pass + + def handle_pi(self, text): + # called for each processing instruction, e.g. + pass + + def handle_decl(self, text): + pass + + def parse_declaration(self, i): + # override internal declaration handler to handle CDATA blocks + if _debug: sys.stderr.write('entering parse_declaration\n') + if self.rawdata[i:i+9] == '', i) + if k == -1: k = len(self.rawdata) + self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) + return k+3 + else: + k = self.rawdata.find('>', i) + return k+1 + + def mapContentType(self, contentType): + contentType = contentType.lower() + if contentType == 'text': + contentType = 'text/plain' + elif contentType == 'html': + contentType = 'text/html' + elif contentType == 'xhtml': + contentType = 'application/xhtml+xml' + return contentType + + def trackNamespace(self, prefix, uri): + loweruri = uri.lower() + if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: + self.version = 'rss090' + if loweruri == 'http://purl.org/rss/1.0/' and not self.version: + self.version = 'rss10' + if loweruri == 'http://www.w3.org/2005/atom' and not self.version: + self.version = 'atom10' + if loweruri.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + uri = 'http://backend.userland.com/rss' + loweruri = uri + if self._matchnamespaces.has_key(loweruri): + self.namespacemap[prefix] = self._matchnamespaces[loweruri] + self.namespacesInUse[self._matchnamespaces[loweruri]] = uri + else: + self.namespacesInUse[prefix or ''] = uri + + def resolveURI(self, uri): + return _urljoin(self.baseuri or '', uri) + + def decodeEntities(self, element, data): + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs]) + + def push(self, element, expectingText): + self.elementstack.append([element, expectingText, []]) + + def pop(self, element, stripWhitespace=1): + if not self.elementstack: return + if self.elementstack[-1][0] != element: return + + element, expectingText, pieces = self.elementstack.pop() + + if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml': + # remove enclosing child element, but only if it is a
and + # only if all the remaining content is nested underneath it. + # This means that the divs would be retained in the following: + #
foo
bar
+ if pieces and (pieces[0] == '
' or pieces[0].startswith('
': + depth = 0 + for piece in pieces[:-1]: + if piece.startswith(''): + depth += 1 + else: + pieces = pieces[1:-1] + + output = ''.join(pieces) + if stripWhitespace: + output = output.strip() + if not expectingText: return output + + # decode base64 content + if base64 and self.contentparams.get('base64', 0): + try: + output = base64.decodestring(output) + except binascii.Error: + pass + except binascii.Incomplete: + pass + + # resolve relative URIs + if (element in self.can_be_relative_uri) and output: + output = self.resolveURI(output) + + # decode entities within embedded markup + if not self.contentparams.get('base64', 0): + output = self.decodeEntities(element, output) + + # remove temporary cruft from contentparams + try: + del self.contentparams['mode'] + except KeyError: + pass + try: + del self.contentparams['base64'] + except KeyError: + pass + + # resolve relative URIs within embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_relative_uris: + output = _resolveRelativeURIs(output, self.baseuri, self.encoding) + + # sanitize embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_dangerous_markup: + output = _sanitizeHTML(output, self.encoding) + + if self.encoding and type(output) != type(u''): + try: + output = unicode(output, self.encoding) + except: + pass + + # address common error where people take data that is already + # utf-8, presume that it is iso-8859-1, and re-encode it. + if self.encoding=='utf-8' and type(output) == type(u''): + try: + output = unicode(output.encode('iso-8859-1'), 'utf-8') + except: + pass + + # map win-1252 extensions to the proper code points + if type(output) == type(u''): + output = u''.join([c in cp1252 and cp1252[c] or c for c in output]) + + # categories/tags/keywords/whatever are handled in _end_category + if element == 'category': + return output + + # store output in appropriate place(s) + if self.inentry and not self.insource: + if element == 'content': + self.entries[-1].setdefault(element, []) + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element].append(contentparams) + elif element == 'link': + self.entries[-1][element] = output + if output: + self.entries[-1]['links'][-1]['href'] = output + else: + if element == 'description': + element = 'summary' + self.entries[-1][element] = output + if self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element + '_detail'] = contentparams + elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage): + context = self._getContext() + if element == 'description': + element = 'subtitle' + context[element] = output + if element == 'link': + context['links'][-1]['href'] = output + elif self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + context[element + '_detail'] = contentparams + return output + + def pushContent(self, tag, attrsD, defaultContentType, expectingText): + self.incontent += 1 + self.contentparams = FeedParserDict({ + 'type': self.mapContentType(attrsD.get('type', defaultContentType)), + 'language': self.lang, + 'base': self.baseuri}) + self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) + self.push(tag, expectingText) + + def popContent(self, tag): + value = self.pop(tag) + self.incontent -= 1 + self.contentparams.clear() + return value + + def _mapToStandardPrefix(self, name): + colonpos = name.find(':') + if colonpos <> -1: + prefix = name[:colonpos] + suffix = name[colonpos+1:] + prefix = self.namespacemap.get(prefix, prefix) + name = prefix + ':' + suffix + return name + + def _getAttribute(self, attrsD, name): + return attrsD.get(self._mapToStandardPrefix(name)) + + def _isBase64(self, attrsD, contentparams): + if attrsD.get('mode', '') == 'base64': + return 1 + if self.contentparams['type'].startswith('text/'): + return 0 + if self.contentparams['type'].endswith('+xml'): + return 0 + if self.contentparams['type'].endswith('/xml'): + return 0 + return 1 + + def _itsAnHrefDamnIt(self, attrsD): + href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) + if href: + try: + del attrsD['url'] + except KeyError: + pass + try: + del attrsD['uri'] + except KeyError: + pass + attrsD['href'] = href + return attrsD + + def _save(self, key, value): + context = self._getContext() + context.setdefault(key, value) + + def _start_rss(self, attrsD): + versionmap = {'0.91': 'rss091u', + '0.92': 'rss092', + '0.93': 'rss093', + '0.94': 'rss094'} + if not self.version: + attr_version = attrsD.get('version', '') + version = versionmap.get(attr_version) + if version: + self.version = version + elif attr_version.startswith('2.'): + self.version = 'rss20' + else: + self.version = 'rss' + + def _start_dlhottitles(self, attrsD): + self.version = 'hotrss' + + def _start_channel(self, attrsD): + self.infeed = 1 + self._cdf_common(attrsD) + _start_feedinfo = _start_channel + + def _cdf_common(self, attrsD): + if attrsD.has_key('lastmod'): + self._start_modified({}) + self.elementstack[-1][-1] = attrsD['lastmod'] + self._end_modified() + if attrsD.has_key('href'): + self._start_link({}) + self.elementstack[-1][-1] = attrsD['href'] + self._end_link() + + def _start_feed(self, attrsD): + self.infeed = 1 + versionmap = {'0.1': 'atom01', + '0.2': 'atom02', + '0.3': 'atom03'} + if not self.version: + attr_version = attrsD.get('version') + version = versionmap.get(attr_version) + if version: + self.version = version + else: + self.version = 'atom' + + def _end_channel(self): + self.infeed = 0 + _end_feed = _end_channel + + def _start_image(self, attrsD): + self.inimage = 1 + self.push('image', 0) + context = self._getContext() + context.setdefault('image', FeedParserDict()) + + def _end_image(self): + self.pop('image') + self.inimage = 0 + + def _start_textinput(self, attrsD): + self.intextinput = 1 + self.push('textinput', 0) + context = self._getContext() + context.setdefault('textinput', FeedParserDict()) + _start_textInput = _start_textinput + + def _end_textinput(self): + self.pop('textinput') + self.intextinput = 0 + _end_textInput = _end_textinput + + def _start_author(self, attrsD): + self.inauthor = 1 + self.push('author', 1) + _start_managingeditor = _start_author + _start_dc_author = _start_author + _start_dc_creator = _start_author + _start_itunes_author = _start_author + + def _end_author(self): + self.pop('author') + self.inauthor = 0 + self._sync_author_detail() + _end_managingeditor = _end_author + _end_dc_author = _end_author + _end_dc_creator = _end_author + _end_itunes_author = _end_author + + def _start_itunes_owner(self, attrsD): + self.inpublisher = 1 + self.push('publisher', 0) + + def _end_itunes_owner(self): + self.pop('publisher') + self.inpublisher = 0 + self._sync_author_detail('publisher') + + def _start_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('contributor', 0) + + def _end_contributor(self): + self.pop('contributor') + self.incontributor = 0 + + def _start_dc_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('name', 0) + + def _end_dc_contributor(self): + self._end_name() + self.incontributor = 0 + + def _start_name(self, attrsD): + self.push('name', 0) + _start_itunes_name = _start_name + + def _end_name(self): + value = self.pop('name') + if self.inpublisher: + self._save_author('name', value, 'publisher') + elif self.inauthor: + self._save_author('name', value) + elif self.incontributor: + self._save_contributor('name', value) + elif self.intextinput: + context = self._getContext() + context['textinput']['name'] = value + _end_itunes_name = _end_name + + def _start_width(self, attrsD): + self.push('width', 0) + + def _end_width(self): + value = self.pop('width') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['width'] = value + + def _start_height(self, attrsD): + self.push('height', 0) + + def _end_height(self): + value = self.pop('height') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['height'] = value + + def _start_url(self, attrsD): + self.push('href', 1) + _start_homepage = _start_url + _start_uri = _start_url + + def _end_url(self): + value = self.pop('href') + if self.inauthor: + self._save_author('href', value) + elif self.incontributor: + self._save_contributor('href', value) + elif self.inimage: + context = self._getContext() + context['image']['href'] = value + elif self.intextinput: + context = self._getContext() + context['textinput']['link'] = value + _end_homepage = _end_url + _end_uri = _end_url + + def _start_email(self, attrsD): + self.push('email', 0) + _start_itunes_email = _start_email + + def _end_email(self): + value = self.pop('email') + if self.inpublisher: + self._save_author('email', value, 'publisher') + elif self.inauthor: + self._save_author('email', value) + elif self.incontributor: + self._save_contributor('email', value) + _end_itunes_email = _end_email + + def _getContext(self): + if self.insource: + context = self.sourcedata + elif self.inentry: + context = self.entries[-1] + else: + context = self.feeddata + return context + + def _save_author(self, key, value, prefix='author'): + context = self._getContext() + context.setdefault(prefix + '_detail', FeedParserDict()) + context[prefix + '_detail'][key] = value + self._sync_author_detail() + + def _save_contributor(self, key, value): + context = self._getContext() + context.setdefault('contributors', [FeedParserDict()]) + context['contributors'][-1][key] = value + + def _sync_author_detail(self, key='author'): + context = self._getContext() + detail = context.get('%s_detail' % key) + if detail: + name = detail.get('name') + email = detail.get('email') + if name and email: + context[key] = '%s (%s)' % (name, email) + elif name: + context[key] = name + elif email: + context[key] = email + else: + author = context.get(key) + if not author: return + emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author) + if not emailmatch: return + email = emailmatch.group(0) + # probably a better way to do the following, but it passes all the tests + author = author.replace(email, '') + author = author.replace('()', '') + author = author.strip() + if author and (author[0] == '('): + author = author[1:] + if author and (author[-1] == ')'): + author = author[:-1] + author = author.strip() + context.setdefault('%s_detail' % key, FeedParserDict()) + context['%s_detail' % key]['name'] = author + context['%s_detail' % key]['email'] = email + + def _start_subtitle(self, attrsD): + self.pushContent('subtitle', attrsD, 'text/plain', 1) + _start_tagline = _start_subtitle + _start_itunes_subtitle = _start_subtitle + + def _end_subtitle(self): + self.popContent('subtitle') + _end_tagline = _end_subtitle + _end_itunes_subtitle = _end_subtitle + + def _start_rights(self, attrsD): + self.pushContent('rights', attrsD, 'text/plain', 1) + _start_dc_rights = _start_rights + _start_copyright = _start_rights + + def _end_rights(self): + self.popContent('rights') + _end_dc_rights = _end_rights + _end_copyright = _end_rights + + def _start_item(self, attrsD): + self.entries.append(FeedParserDict()) + self.push('item', 0) + self.inentry = 1 + self.guidislink = 0 + id = self._getAttribute(attrsD, 'rdf:about') + if id: + context = self._getContext() + context['id'] = id + self._cdf_common(attrsD) + _start_entry = _start_item + _start_product = _start_item + + def _end_item(self): + self.pop('item') + self.inentry = 0 + _end_entry = _end_item + + def _start_dc_language(self, attrsD): + self.push('language', 1) + _start_language = _start_dc_language + + def _end_dc_language(self): + self.lang = self.pop('language') + _end_language = _end_dc_language + + def _start_dc_publisher(self, attrsD): + self.push('publisher', 1) + _start_webmaster = _start_dc_publisher + + def _end_dc_publisher(self): + self.pop('publisher') + self._sync_author_detail('publisher') + _end_webmaster = _end_dc_publisher + + def _start_published(self, attrsD): + self.push('published', 1) + _start_dcterms_issued = _start_published + _start_issued = _start_published + + def _end_published(self): + value = self.pop('published') + self._save('published_parsed', _parse_date(value)) + _end_dcterms_issued = _end_published + _end_issued = _end_published + + def _start_updated(self, attrsD): + self.push('updated', 1) + _start_modified = _start_updated + _start_dcterms_modified = _start_updated + _start_pubdate = _start_updated + _start_dc_date = _start_updated + + def _end_updated(self): + value = self.pop('updated') + parsed_value = _parse_date(value) + self._save('updated_parsed', parsed_value) + _end_modified = _end_updated + _end_dcterms_modified = _end_updated + _end_pubdate = _end_updated + _end_dc_date = _end_updated + + def _start_created(self, attrsD): + self.push('created', 1) + _start_dcterms_created = _start_created + + def _end_created(self): + value = self.pop('created') + self._save('created_parsed', _parse_date(value)) + _end_dcterms_created = _end_created + + def _start_expirationdate(self, attrsD): + self.push('expired', 1) + + def _end_expirationdate(self): + self._save('expired_parsed', _parse_date(self.pop('expired'))) + + def _start_cc_license(self, attrsD): + self.push('license', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('license') + + def _start_creativecommons_license(self, attrsD): + self.push('license', 1) + + def _end_creativecommons_license(self): + self.pop('license') + + def _addTag(self, term, scheme, label): + context = self._getContext() + tags = context.setdefault('tags', []) + if (not term) and (not scheme) and (not label): return + value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) + if value not in tags: + tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) + + def _start_category(self, attrsD): + if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) + term = attrsD.get('term') + scheme = attrsD.get('scheme', attrsD.get('domain')) + label = attrsD.get('label') + self._addTag(term, scheme, label) + self.push('category', 1) + _start_dc_subject = _start_category + _start_keywords = _start_category + + def _end_itunes_keywords(self): + for term in self.pop('itunes_keywords').split(): + self._addTag(term, 'http://www.itunes.com/', None) + + def _start_itunes_category(self, attrsD): + self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) + self.push('category', 1) + + def _end_category(self): + value = self.pop('category') + if not value: return + context = self._getContext() + tags = context['tags'] + if value and len(tags) and not tags[-1]['term']: + tags[-1]['term'] = value + else: + self._addTag(value, None, None) + _end_dc_subject = _end_category + _end_keywords = _end_category + _end_itunes_category = _end_category + + def _start_cloud(self, attrsD): + self._getContext()['cloud'] = FeedParserDict(attrsD) + + def _start_link(self, attrsD): + attrsD.setdefault('rel', 'alternate') + attrsD.setdefault('type', 'text/html') + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + expectingText = self.infeed or self.inentry or self.insource + context = self._getContext() + context.setdefault('links', []) + context['links'].append(FeedParserDict(attrsD)) + if attrsD['rel'] == 'enclosure': + self._start_enclosure(attrsD) + if attrsD.has_key('href'): + expectingText = 0 + if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): + context['link'] = attrsD['href'] + else: + self.push('link', expectingText) + _start_producturl = _start_link + + def _end_link(self): + value = self.pop('link') + context = self._getContext() + if self.intextinput: + context['textinput']['link'] = value + if self.inimage: + context['image']['link'] = value + _end_producturl = _end_link + + def _start_guid(self, attrsD): + self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') + self.push('id', 1) + + def _end_guid(self): + value = self.pop('id') + self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) + if self.guidislink: + # guid acts as link, but only if 'ispermalink' is not present or is 'true', + # and only if the item doesn't already have a link element + self._save('link', value) + + def _start_title(self, attrsD): + self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + _start_dc_title = _start_title + _start_media_title = _start_title + + def _end_title(self): + value = self.popContent('title') + context = self._getContext() + if self.intextinput: + context['textinput']['title'] = value + elif self.inimage: + context['image']['title'] = value + _end_dc_title = _end_title + _end_media_title = _end_title + + def _start_description(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) + + def _start_abstract(self, attrsD): + self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + + def _end_description(self): + if self._summaryKey == 'content': + self._end_content() + else: + value = self.popContent('description') + context = self._getContext() + if self.intextinput: + context['textinput']['description'] = value + elif self.inimage: + context['image']['description'] = value + self._summaryKey = None + _end_abstract = _end_description + + def _start_info(self, attrsD): + self.pushContent('info', attrsD, 'text/plain', 1) + _start_feedburner_browserfriendly = _start_info + + def _end_info(self): + self.popContent('info') + _end_feedburner_browserfriendly = _end_info + + def _start_generator(self, attrsD): + if attrsD: + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + self._getContext()['generator_detail'] = FeedParserDict(attrsD) + self.push('generator', 1) + + def _end_generator(self): + value = self.pop('generator') + context = self._getContext() + if context.has_key('generator_detail'): + context['generator_detail']['name'] = value + + def _start_admin_generatoragent(self, attrsD): + self.push('generator', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('generator') + self._getContext()['generator_detail'] = FeedParserDict({'href': value}) + + def _start_admin_errorreportsto(self, attrsD): + self.push('errorreportsto', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('errorreportsto') + + def _start_summary(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self._summaryKey = 'summary' + self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) + _start_itunes_summary = _start_summary + + def _end_summary(self): + if self._summaryKey == 'content': + self._end_content() + else: + self.popContent(self._summaryKey or 'summary') + self._summaryKey = None + _end_itunes_summary = _end_summary + + def _start_enclosure(self, attrsD): + attrsD = self._itsAnHrefDamnIt(attrsD) + self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) + href = attrsD.get('href') + if href: + context = self._getContext() + if not context.get('id'): + context['id'] = href + + def _start_source(self, attrsD): + self.insource = 1 + + def _end_source(self): + self.insource = 0 + self._getContext()['source'] = copy.deepcopy(self.sourcedata) + self.sourcedata.clear() + + def _start_content(self, attrsD): + self.pushContent('content', attrsD, 'text/plain', 1) + src = attrsD.get('src') + if src: + self.contentparams['src'] = src + self.push('content', 1) + + def _start_prodlink(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + + def _start_body(self, attrsD): + self.pushContent('content', attrsD, 'application/xhtml+xml', 1) + _start_xhtml_body = _start_body + + def _start_content_encoded(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + _start_fullitem = _start_content_encoded + + def _end_content(self): + copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) + value = self.popContent('content') + if copyToDescription: + self._save('description', value) + _end_body = _end_content + _end_xhtml_body = _end_content + _end_content_encoded = _end_content + _end_fullitem = _end_content + _end_prodlink = _end_content + + def _start_itunes_image(self, attrsD): + self.push('itunes_image', 0) + self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) + _start_itunes_link = _start_itunes_image + + def _end_itunes_block(self): + value = self.pop('itunes_block', 0) + self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 + + def _end_itunes_explicit(self): + value = self.pop('itunes_explicit', 0) + self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 + +if _XML_AVAILABLE: + class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): + def __init__(self, baseuri, baselang, encoding): + if _debug: sys.stderr.write('trying StrictFeedParser\n') + xml.sax.handler.ContentHandler.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + self.bozo = 0 + self.exc = None + + def startPrefixMapping(self, prefix, uri): + self.trackNamespace(prefix, uri) + + def startElementNS(self, name, qname, attrs): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if lowernamespace.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + namespace = 'http://backend.userland.com/rss' + lowernamespace = namespace + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = None + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): + raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) + + # qname implementation is horribly broken in Python 2.1 (it + # doesn't report any), and slightly broken in Python 2.2 (it + # doesn't report the xml: namespace). So we match up namespaces + # with a known list first, and then possibly override them with + # the qnames the SAX parser gives us (if indeed it gives us any + # at all). Thanks to MatejC for helping me test this and + # tirelessly telling me that it didn't work yet. + attrsD = {} + for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): + lowernamespace = (namespace or '').lower() + prefix = self._matchnamespaces.get(lowernamespace, '') + if prefix: + attrlocalname = prefix + ':' + attrlocalname + attrsD[str(attrlocalname).lower()] = attrvalue + for qname in attrs.getQNames(): + attrsD[str(qname).lower()] = attrs.getValueByQName(qname) + self.unknown_starttag(localname, attrsD.items()) + + def characters(self, text): + self.handle_data(text) + + def endElementNS(self, name, qname): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = '' + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + self.unknown_endtag(localname) + + def error(self, exc): + self.bozo = 1 + self.exc = exc + + def fatalError(self, exc): + self.error(exc) + raise exc + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + def __init__(self, encoding): + self.encoding = encoding + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + def feed(self, data): + data = re.compile(r'', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace + data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) + data = data.replace(''', "'") + data = data.replace('"', '"') + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + sgmllib.SGMLParser.close(self) + + def normalize_attrs(self, attrs): + # utility method to be called by descendants + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
, tag='pre', attrs=[('class', 'screen')]
+        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
+        uattrs = []
+        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
+        for key, value in attrs:
+            if type(value) != type(u''):
+                value = unicode(value, self.encoding)
+            uattrs.append((unicode(key, self.encoding), value))
+        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
+        if tag in self.elements_no_end_tag:
+            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
+        else:
+            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
+
+    def unknown_endtag(self, tag):
+        # called for each end tag, e.g. for 
, tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + import htmlentitydefs + if not hasattr(htmlentitydefs, 'name2codepoint') or htmlentitydefs.name2codepoint.has_key(ref): + self.pieces.append('&%(ref)s;' % locals()) + else: + self.pieces.append('&%(ref)s' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): + def __init__(self, baseuri, baselang, encoding): + sgmllib.SGMLParser.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + + def decodeEntities(self, element, data): + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace('"', '"') + data = data.replace(''', ''') + data = data.replace(''', ''') + if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace(''', "'") + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % t for t in attrs]) + +class _RelativeURIResolver(_BaseHTMLProcessor): + relative_uris = [('a', 'href'), + ('applet', 'codebase'), + ('area', 'href'), + ('blockquote', 'cite'), + ('body', 'background'), + ('del', 'cite'), + ('form', 'action'), + ('frame', 'longdesc'), + ('frame', 'src'), + ('iframe', 'longdesc'), + ('iframe', 'src'), + ('head', 'profile'), + ('img', 'longdesc'), + ('img', 'src'), + ('img', 'usemap'), + ('input', 'src'), + ('input', 'usemap'), + ('ins', 'cite'), + ('link', 'href'), + ('object', 'classid'), + ('object', 'codebase'), + ('object', 'data'), + ('object', 'usemap'), + ('q', 'cite'), + ('script', 'src')] + + def __init__(self, baseuri, encoding): + _BaseHTMLProcessor.__init__(self, encoding) + self.baseuri = baseuri + + def resolveURI(self, uri): + return _urljoin(self.baseuri, uri) + + def unknown_starttag(self, tag, attrs): + attrs = self.normalize_attrs(attrs) + attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + +def _resolveRelativeURIs(htmlSource, baseURI, encoding): + if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') + p = _RelativeURIResolver(baseURI, encoding) + p.feed(htmlSource) + return p.output() + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', + 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', + 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', + 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', + 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', + 'thead', 'tr', 'tt', 'u', 'ul', 'var'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', + 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', + 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', + 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', + 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', + 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', + 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', + 'usemap', 'valign', 'value', 'vspace', 'width', 'xml:lang'] + + unacceptable_elements_with_end_tag = ['script', 'applet'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.unacceptablestack = 0 + + def unknown_starttag(self, tag, attrs): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack += 1 + return + attrs = self.normalize_attrs(attrs) + attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + + def unknown_endtag(self, tag): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack -= 1 + return + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.unacceptablestack: + _BaseHTMLProcessor.handle_data(self, text) + +def _sanitizeHTML(htmlSource, encoding): + p = _HTMLSanitizer(encoding) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count(''): + data = data.split('>', 1)[1] + if data.count('= '2.3.3' + assert base64 != None + user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') + realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] + self.add_password(realm, host, user, passw) + retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) + self.reset_retry_count() + return retry + except: + return self.http_error_default(req, fp, code, msg, headers) + +def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): + """URL, filename, or string --> stream + + This function lets you define parsers that take any input source + (URL, pathname to local or network file, or actual data as a string) + and deal with it in a uniform manner. Returned object is guaranteed + to have all the basic stdio read methods (read, readline, readlines). + Just .close() the object when you're done with it. + + If the etag argument is supplied, it will be used as the value of an + If-None-Match request header. + + If the modified argument is supplied, it must be a tuple of 9 integers + as returned by gmtime() in the standard Python time module. This MUST + be in GMT (Greenwich Mean Time). The formatted date/time will be used + as the value of an If-Modified-Since request header. + + If the agent argument is supplied, it will be used as the value of a + User-Agent request header. + + If the referrer argument is supplied, it will be used as the value of a + Referer[sic] request header. + + If handlers is supplied, it is a list of handlers used to build a + urllib2 opener. + """ + + if hasattr(url_file_stream_or_string, 'read'): + return url_file_stream_or_string + + if url_file_stream_or_string == '-': + return sys.stdin + + if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): + if not agent: + agent = USER_AGENT + # test for inline user:password for basic auth + auth = None + if base64: + urltype, rest = urllib.splittype(url_file_stream_or_string) + realhost, rest = urllib.splithost(rest) + if realhost: + user_passwd, realhost = urllib.splituser(realhost) + if user_passwd: + url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) + auth = base64.encodestring(user_passwd).strip() + # try to open with urllib2 (to use optional headers) + request = urllib2.Request(url_file_stream_or_string) + request.add_header('User-Agent', agent) + if etag: + request.add_header('If-None-Match', etag) + if modified: + # format into an RFC 1123-compliant timestamp. We can't use + # time.strftime() since the %a and %b directives can be affected + # by the current locale, but RFC 2616 states that dates must be + # in English. + short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) + if referrer: + request.add_header('Referer', referrer) + if gzip and zlib: + request.add_header('Accept-encoding', 'gzip, deflate') + elif gzip: + request.add_header('Accept-encoding', 'gzip') + elif zlib: + request.add_header('Accept-encoding', 'deflate') + else: + request.add_header('Accept-encoding', '') + if auth: + request.add_header('Authorization', 'Basic %s' % auth) + if ACCEPT_HEADER: + request.add_header('Accept', ACCEPT_HEADER) + request.add_header('A-IM', 'feed') # RFC 3229 support + opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) + opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent + try: + return opener.open(request) + finally: + opener.close() # JohnD + + # try to open with native open function (if url_file_stream_or_string is a filename) + try: + return open(url_file_stream_or_string) + except: + pass + + # treat url_file_stream_or_string as string + return _StringIO(str(url_file_stream_or_string)) + +_date_handlers = [] +def registerDateHandler(func): + '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' + _date_handlers.insert(0, func) + +# ISO-8601 date parsing routines written by Fazal Majid. +# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 +# parser is beyond the scope of feedparser and would be a worthwhile addition +# to the Python library. +# A single regular expression cannot parse ISO 8601 date formats into groups +# as the standard is highly irregular (for instance is 030104 2003-01-04 or +# 0301-04-01), so we use templates instead. +# Please note the order in templates is significant because we need a +# greedy match. +_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', + 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', + '-YY-?MM', '-OOO', '-YY', + '--MM-?DD', '--MM', + '---DD', + 'CC', ''] +_iso8601_re = [ + tmpl.replace( + 'YYYY', r'(?P\d{4})').replace( + 'YY', r'(?P\d\d)').replace( + 'MM', r'(?P[01]\d)').replace( + 'DD', r'(?P[0123]\d)').replace( + 'OOO', r'(?P[0123]\d\d)').replace( + 'CC', r'(?P\d\d$)') + + r'(T?(?P\d{2}):(?P\d{2})' + + r'(:(?P\d{2}))?' + + r'(?P[+-](?P\d{2})(:(?P\d{2}))?|Z)?)?' + for tmpl in _iso8601_tmpl] +del tmpl +_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] +del regex +def _parse_date_iso8601(dateString): + '''Parse a variety of ISO-8601-compatible formats like 20040105''' + m = None + for _iso8601_match in _iso8601_matches: + m = _iso8601_match(dateString) + if m: break + if not m: return + if m.span() == (0, 0): return + params = m.groupdict() + ordinal = params.get('ordinal', 0) + if ordinal: + ordinal = int(ordinal) + else: + ordinal = 0 + year = params.get('year', '--') + if not year or year == '--': + year = time.gmtime()[0] + elif len(year) == 2: + # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 + year = 100 * int(time.gmtime()[0] / 100) + int(year) + else: + year = int(year) + month = params.get('month', '-') + if not month or month == '-': + # ordinals are NOT normalized by mktime, we simulate them + # by setting month=1, day=ordinal + if ordinal: + month = 1 + else: + month = time.gmtime()[1] + month = int(month) + day = params.get('day', 0) + if not day: + # see above + if ordinal: + day = ordinal + elif params.get('century', 0) or \ + params.get('year', 0) or params.get('month', 0): + day = 1 + else: + day = time.gmtime()[2] + else: + day = int(day) + # special case of the century - is the first year of the 21st century + # 2000 or 2001 ? The debate goes on... + if 'century' in params.keys(): + year = (int(params['century']) - 1) * 100 + 1 + # in ISO 8601 most fields are optional + for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: + if not params.get(field, None): + params[field] = 0 + hour = int(params.get('hour', 0)) + minute = int(params.get('minute', 0)) + second = int(params.get('second', 0)) + # weekday is normalized by mktime(), we can ignore it + weekday = 0 + # daylight savings is complex, but not needed for feedparser's purposes + # as time zones, if specified, include mention of whether it is active + # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and + # and most implementations have DST bugs + daylight_savings_flag = 0 + tm = [year, month, day, hour, minute, second, weekday, + ordinal, daylight_savings_flag] + # ISO 8601 time zone adjustments + tz = params.get('tz') + if tz and tz != 'Z': + if tz[0] == '-': + tm[3] += int(params.get('tzhour', 0)) + tm[4] += int(params.get('tzmin', 0)) + elif tz[0] == '+': + tm[3] -= int(params.get('tzhour', 0)) + tm[4] -= int(params.get('tzmin', 0)) + else: + return None + # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) + # which is guaranteed to normalize d/m/y/h/m/s. + # Many implementations have bugs, but we'll pretend they don't. + return time.localtime(time.mktime(tm)) +registerDateHandler(_parse_date_iso8601) + +# 8-bit date handling routines written by ytrewq1. +_korean_year = u'\ub144' # b3e2 in euc-kr +_korean_month = u'\uc6d4' # bff9 in euc-kr +_korean_day = u'\uc77c' # c0cf in euc-kr +_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr +_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr + +_korean_onblog_date_re = \ + re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ + (_korean_year, _korean_month, _korean_day)) +_korean_nate_date_re = \ + re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ + (_korean_am, _korean_pm)) +def _parse_date_onblog(dateString): + '''Parse a string according to the OnBlog 8-bit date format''' + m = _korean_onblog_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_onblog) + +def _parse_date_nate(dateString): + '''Parse a string according to the Nate 8-bit date format''' + m = _korean_nate_date_re.match(dateString) + if not m: return + hour = int(m.group(5)) + ampm = m.group(4) + if (ampm == _korean_pm): + hour += 12 + hour = str(hour) + if len(hour) == 1: + hour = '0' + hour + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_nate) + +_mssql_date_re = \ + re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') +def _parse_date_mssql(dateString): + '''Parse a string according to the MS SQL date format''' + m = _mssql_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_mssql) + +# Unicode strings for Greek date strings +_greek_months = \ + { \ + u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 + u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 + u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 + u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 + u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 + u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 + u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 + u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 + u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 + u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 + u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 + u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 + u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 + u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 + u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 + u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 + u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 + } + +_greek_wdays = \ + { \ + u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 + u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 + u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 + u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 + u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 + u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 + u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 + } + +_greek_date_format_re = \ + re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') + +def _parse_date_greek(dateString): + '''Parse a string according to a Greek 8-bit date format.''' + m = _greek_date_format_re.match(dateString) + if not m: return + try: + wday = _greek_wdays[m.group(1)] + month = _greek_months[m.group(3)] + except: + return + rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ + {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ + 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': m.group(8)} + if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) + return _parse_date_rfc822(rfc822date) +registerDateHandler(_parse_date_greek) + +# Unicode strings for Hungarian date strings +_hungarian_months = \ + { \ + u'janu\u00e1r': u'01', # e1 in iso-8859-2 + u'febru\u00e1ri': u'02', # e1 in iso-8859-2 + u'm\u00e1rcius': u'03', # e1 in iso-8859-2 + u'\u00e1prilis': u'04', # e1 in iso-8859-2 + u'm\u00e1ujus': u'05', # e1 in iso-8859-2 + u'j\u00fanius': u'06', # fa in iso-8859-2 + u'j\u00falius': u'07', # fa in iso-8859-2 + u'augusztus': u'08', + u'szeptember': u'09', + u'okt\u00f3ber': u'10', # f3 in iso-8859-2 + u'november': u'11', + u'december': u'12', + } + +_hungarian_date_format_re = \ + re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') + +def _parse_date_hungarian(dateString): + '''Parse a string according to a Hungarian 8-bit date format.''' + m = _hungarian_date_format_re.match(dateString) + if not m: return + try: + month = _hungarian_months[m.group(2)] + day = m.group(3) + if len(day) == 1: + day = '0' + day + hour = m.group(4) + if len(hour) == 1: + hour = '0' + hour + except: + return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ + {'year': m.group(1), 'month': month, 'day': day,\ + 'hour': hour, 'minute': m.group(5),\ + 'zonediff': m.group(6)} + if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_hungarian) + +# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by +# Drake and licensed under the Python license. Removed all range checking +# for month, day, hour, minute, and second, since mktime will normalize +# these later +def _parse_date_w3dtf(dateString): + def __extract_date(m): + year = int(m.group('year')) + if year < 100: + year = 100 * int(time.gmtime()[0] / 100) + int(year) + if year < 1000: + return 0, 0, 0 + julian = m.group('julian') + if julian: + julian = int(julian) + month = julian / 30 + 1 + day = julian % 30 + 1 + jday = None + while jday != julian: + t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) + jday = time.gmtime(t)[-2] + diff = abs(jday - julian) + if jday > julian: + if diff < day: + day = day - diff + else: + month = month - 1 + day = 31 + elif jday < julian: + if day + diff < 28: + day = day + diff + else: + month = month + 1 + return year, month, day + month = m.group('month') + day = 1 + if month is None: + month = 1 + else: + month = int(month) + day = m.group('day') + if day: + day = int(day) + else: + day = 1 + return year, month, day + + def __extract_time(m): + if not m: + return 0, 0, 0 + hours = m.group('hours') + if not hours: + return 0, 0, 0 + hours = int(hours) + minutes = int(m.group('minutes')) + seconds = m.group('seconds') + if seconds: + seconds = int(seconds) + else: + seconds = 0 + return hours, minutes, seconds + + def __extract_tzd(m): + '''Return the Time Zone Designator as an offset in seconds from UTC.''' + if not m: + return 0 + tzd = m.group('tzd') + if not tzd: + return 0 + if tzd == 'Z': + return 0 + hours = int(m.group('tzdhours')) + minutes = m.group('tzdminutes') + if minutes: + minutes = int(minutes) + else: + minutes = 0 + offset = (hours*60 + minutes) * 60 + if tzd[0] == '+': + return -offset + return offset + + __date_re = ('(?P\d\d\d\d)' + '(?:(?P-|)' + '(?:(?P\d\d\d)' + '|(?P\d\d)(?:(?P=dsep)(?P\d\d))?))?') + __tzd_re = '(?P[-+](?P\d\d)(?::?(?P\d\d))|Z)' + __tzd_rx = re.compile(__tzd_re) + __time_re = ('(?P\d\d)(?P:|)(?P\d\d)' + '(?:(?P=tsep)(?P\d\d(?:[.,]\d+)?))?' + + __tzd_re) + __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) + __datetime_rx = re.compile(__datetime_re) + m = __datetime_rx.match(dateString) + if (m is None) or (m.group() != dateString): return + gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) + if gmt[0] == 0: return + return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) +registerDateHandler(_parse_date_w3dtf) + +def _parse_date_rfc822(dateString): + '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' + data = dateString.split() + if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: + del data[0] + if len(data) == 4: + s = data[3] + i = s.find('+') + if i > 0: + data[3:] = [s[:i], s[i+1:]] + else: + data.append('') + dateString = " ".join(data) + if len(data) < 5: + dateString += ' 00:00:00 GMT' + tm = rfc822.parsedate_tz(dateString) + if tm: + return time.gmtime(rfc822.mktime_tz(tm)) +# rfc822.py defines several time zones, but we define some extra ones. +# 'ET' is equivalent to 'EST', etc. +_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} +rfc822._timezones.update(_additional_timezones) +registerDateHandler(_parse_date_rfc822) + +def _parse_date(dateString): + '''Parses a variety of date formats into a 9-tuple in GMT''' + for handler in _date_handlers: + try: + date9tuple = handler(dateString) + if not date9tuple: continue + if len(date9tuple) != 9: + if _debug: sys.stderr.write('date handler function must return 9-tuple\n') + raise ValueError + map(int, date9tuple) + return date9tuple + except Exception, e: + if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) + pass + return None + +def _getCharacterEncoding(http_headers, xml_data): + '''Get the character encoding of the XML document + + http_headers is a dictionary + xml_data is a raw string (not Unicode) + + This is so much trickier than it sounds, it's not even funny. + According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type + is application/xml, application/*+xml, + application/xml-external-parsed-entity, or application/xml-dtd, + the encoding given in the charset parameter of the HTTP Content-Type + takes precedence over the encoding given in the XML prefix within the + document, and defaults to 'utf-8' if neither are specified. But, if + the HTTP Content-Type is text/xml, text/*+xml, or + text/xml-external-parsed-entity, the encoding given in the XML prefix + within the document is ALWAYS IGNORED and only the encoding given in + the charset parameter of the HTTP Content-Type header should be + respected, and it defaults to 'us-ascii' if not specified. + + Furthermore, discussion on the atom-syntax mailing list with the + author of RFC 3023 leads me to the conclusion that any document + served with a Content-Type of text/* and no charset parameter + must be treated as us-ascii. (We now do this.) And also that it + must always be flagged as non-well-formed. (We now do this too.) + + If Content-Type is unspecified (input was local file or non-HTTP source) + or unrecognized (server just got it totally wrong), then go by the + encoding given in the XML prefix of the document and default to + 'iso-8859-1' as per the HTTP specification (RFC 2616). + + Then, assuming we didn't find a character encoding in the HTTP headers + (and the HTTP Content-type allowed us to look in the body), we need + to sniff the first few bytes of the XML data and try to determine + whether the encoding is ASCII-compatible. Section F of the XML + specification shows the way here: + http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + + If the sniffed encoding is not ASCII-compatible, we need to make it + ASCII compatible so that we can sniff further into the XML declaration + to find the encoding attribute, which will tell us the true encoding. + + Of course, none of this guarantees that we will be able to parse the + feed in the declared character encoding (assuming it was declared + correctly, which many are not). CJKCodecs and iconv_codec help a lot; + you should definitely install them if you can. + http://cjkpython.i18n.org/ + ''' + + def _parseHTTPContentType(content_type): + '''takes HTTP Content-Type header and returns (content type, charset) + + If no charset is specified, returns (content type, '') + If no content type is specified, returns ('', '') + Both return parameters are guaranteed to be lowercase strings + ''' + content_type = content_type or '' + content_type, params = cgi.parse_header(content_type) + return content_type, params.get('charset', '').replace("'", '') + + sniffed_xml_encoding = '' + xml_encoding = '' + true_encoding = '' + http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) + # Must sniff for non-ASCII-compatible character encodings before + # searching for XML declaration. This heuristic is defined in + # section F of the XML specification: + # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + try: + if xml_data[:4] == '\x4c\x6f\xa7\x94': + # EBCDIC + xml_data = _ebcdic_to_ascii(xml_data) + elif xml_data[:4] == '\x00\x3c\x00\x3f': + # UTF-16BE + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): + # UTF-16BE with BOM + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x3f\x00': + # UTF-16LE + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): + # UTF-16LE with BOM + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\x00\x3c': + # UTF-32BE + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x00\x00': + # UTF-32LE + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\xfe\xff': + # UTF-32BE with BOM + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\xff\xfe\x00\x00': + # UTF-32LE with BOM + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') + elif xml_data[:3] == '\xef\xbb\xbf': + # UTF-8 with BOM + sniffed_xml_encoding = 'utf-8' + xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') + else: + # ASCII-compatible + pass + xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) + except: + xml_encoding_match = None + if xml_encoding_match: + xml_encoding = xml_encoding_match.groups()[0].lower() + if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): + xml_encoding = sniffed_xml_encoding + acceptable_content_type = 0 + application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') + text_content_types = ('text/xml', 'text/xml-external-parsed-entity') + if (http_content_type in application_content_types) or \ + (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): + acceptable_content_type = 1 + true_encoding = http_encoding or xml_encoding or 'utf-8' + elif (http_content_type in text_content_types) or \ + (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): + acceptable_content_type = 1 + true_encoding = http_encoding or 'us-ascii' + elif http_content_type.startswith('text/'): + true_encoding = http_encoding or 'us-ascii' + elif http_headers and (not http_headers.has_key('content-type')): + true_encoding = xml_encoding or 'iso-8859-1' + else: + true_encoding = xml_encoding or 'utf-8' + return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type + +def _toUTF8(data, encoding): + '''Changes an XML data stream on the fly to specify a new encoding + + data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already + encoding is a string recognized by encodings.aliases + ''' + if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) + # strip Byte Order Mark (if present) + if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16be': + sys.stderr.write('trying utf-16be instead\n') + encoding = 'utf-16be' + data = data[2:] + elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16le': + sys.stderr.write('trying utf-16le instead\n') + encoding = 'utf-16le' + data = data[2:] + elif data[:3] == '\xef\xbb\xbf': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-8': + sys.stderr.write('trying utf-8 instead\n') + encoding = 'utf-8' + data = data[3:] + elif data[:4] == '\x00\x00\xfe\xff': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32be': + sys.stderr.write('trying utf-32be instead\n') + encoding = 'utf-32be' + data = data[4:] + elif data[:4] == '\xff\xfe\x00\x00': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32le': + sys.stderr.write('trying utf-32le instead\n') + encoding = 'utf-32le' + data = data[4:] + newdata = unicode(data, encoding) + if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) + declmatch = re.compile('^<\?xml[^>]*?>') + newdecl = '''''' + if declmatch.search(newdata): + newdata = declmatch.sub(newdecl, newdata) + else: + newdata = newdecl + u'\n' + newdata + return newdata.encode('utf-8') + +def _stripDoctype(data): + '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) + + rss_version may be 'rss091n' or None + stripped_data is the same XML document, minus the DOCTYPE + ''' + entity_pattern = re.compile(r']*?)>', re.MULTILINE) + data = entity_pattern.sub('', data) + doctype_pattern = re.compile(r']*?)>', re.MULTILINE) + doctype_results = doctype_pattern.findall(data) + doctype = doctype_results and doctype_results[0] or '' + if doctype.lower().count('netscape'): + version = 'rss091n' + else: + version = None + data = doctype_pattern.sub('', data) + return version, data + +def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): + '''Parse a feed from a URL, file, stream, or string''' + result = FeedParserDict() + result['feed'] = FeedParserDict() + result['entries'] = [] + if _XML_AVAILABLE: + result['bozo'] = 0 + if type(handlers) == types.InstanceType: + handlers = [handlers] + try: + f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) + data = f.read() + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + f = None + + # if feed is gzip-compressed, decompress it + if f and data and hasattr(f, 'headers'): + if gzip and f.headers.get('content-encoding', '') == 'gzip': + try: + data = gzip.GzipFile(fileobj=_StringIO(data)).read() + except Exception, e: + # Some feeds claim to be gzipped but they're not, so + # we get garbage. Ideally, we should re-request the + # feed without the 'Accept-encoding: gzip' header, + # but we don't. + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + elif zlib and f.headers.get('content-encoding', '') == 'deflate': + try: + data = zlib.decompress(data, -zlib.MAX_WBITS) + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + + # save HTTP headers + if hasattr(f, 'info'): + info = f.info() + result['etag'] = info.getheader('ETag') + last_modified = info.getheader('Last-Modified') + if last_modified: + result['modified'] = _parse_date(last_modified) + if hasattr(f, 'url'): + result['href'] = f.url + result['status'] = 200 + if hasattr(f, 'status'): + result['status'] = f.status + if hasattr(f, 'headers'): + result['headers'] = f.headers.dict + if hasattr(f, 'close'): + f.close() + + # there are four encodings to keep track of: + # - http_encoding is the encoding declared in the Content-Type HTTP header + # - xml_encoding is the encoding declared in the ; changed +# project name +#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); +# removed unnecessary urllib code -- urllib2 should always be available anyway; +# return actual url, status, and full HTTP headers (as result['url'], +# result['status'], and result['headers']) if parsing a remote feed over HTTP -- +# this should pass all the HTTP tests at ; +# added the latest namespace-of-the-week for RSS 2.0 +#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom +# User-Agent (otherwise urllib2 sends two, which confuses some servers) +#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for +# inline and as used in some RSS 2.0 feeds +#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or +# textInput, and also to return the character encoding (if specified) +#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking +# nested divs within content (JohnD); fixed missing sys import (JohanS); +# fixed regular expression to capture XML character encoding (Andrei); +# added support for Atom 0.3-style links; fixed bug with textInput tracking; +# added support for cloud (MartijnP); added support for multiple +# category/dc:subject (MartijnP); normalize content model: 'description' gets +# description (which can come from description, summary, or full content if no +# description), 'content' gets dict of base/language/type/value (which can come +# from content:encoded, xhtml:body, content, or fullitem); +# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang +# tracking; fixed bug tracking unknown tags; fixed bug tracking content when +# element is not in default namespace (like Pocketsoap feed); +# resolve relative URLs in link, guid, docs, url, comments, wfw:comment, +# wfw:commentRSS; resolve relative URLs within embedded HTML markup in +# description, xhtml:body, content, content:encoded, title, subtitle, +# summary, info, tagline, and copyright; added support for pingback and +# trackback namespaces +#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback +# namespaces, as opposed to 2.6 when I said I did but didn't really; +# sanitize HTML markup within some elements; added mxTidy support (if +# installed) to tidy HTML markup within some elements; fixed indentation +# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available +# (FazalM); universal date parsing and normalization (FazalM): 'created', modified', +# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', +# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' +# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa +#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory +# leak not closing url opener (JohnD); added dc:publisher support (MarekK); +# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) +#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed
tags in +# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); +# fixed relative URI processing for guid (skadz); added ICBM support; added +# base64 support +#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many +# blogspot.com sites); added _debug variable +#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing +#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); +# added several new supported namespaces; fixed bug tracking naked markup in +# description; added support for enclosure; added support for source; re-added +# support for cloud which got dropped somehow; added support for expirationDate +#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking +# xml:base URI, one for documents that don't define one explicitly and one for +# documents that define an outer and an inner xml:base that goes out of scope +# before the end of the document +#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level +#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] +# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; +# added support for creativeCommons:license and cc:license; added support for +# full Atom content model in title, tagline, info, copyright, summary; fixed bug +# with gzip encoding (not always telling server we support it when we do) +#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail +# (dictionary of 'name', 'url', 'email'); map author to author_detail if author +# contains name + email address +#3.0b8 - 1/28/2004 - MAP - added support for contributor +#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added +# support for summary +#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from +# xml.util.iso8601 +#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain +# dangerous markup; fiddled with decodeEntities (not right); liberalized +# date parsing even further +#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); +# added support to Atom 0.2 subtitle; added support for Atom content model +# in copyright; better sanitizing of dangerous HTML elements with end tags +# (script, frameset) +#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, +# etc.) in embedded markup, in either HTML or XHTML form (
,
,
) +#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under +# Python 2.1 +#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; +# fixed bug capturing author and contributor URL; fixed bug resolving relative +# links in author and contributor URL; fixed bug resolvin relative links in +# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's +# namespace tests, and included them permanently in the test suite with his +# permission; fixed namespace handling under Python 2.1 +#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) +#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 +#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); +# use libxml2 (if available) +#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author +# name was in parentheses; removed ultra-problematic mxTidy support; patch to +# workaround crash in PyXML/expat when encountering invalid entities +# (MarkMoraes); support for textinput/textInput +#3.0b20 - 4/7/2004 - MAP - added CDF support +#3.0b21 - 4/14/2004 - MAP - added Hot RSS support +#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in +# results dict; changed results dict to allow getting values with results.key +# as well as results[key]; work around embedded illformed HTML with half +# a DOCTYPE; work around malformed Content-Type header; if character encoding +# is wrong, try several common ones before falling back to regexes (if this +# works, bozo_exception is set to CharacterEncodingOverride); fixed character +# encoding issues in BaseHTMLProcessor by tracking encoding and converting +# from Unicode to raw strings before feeding data to sgmllib.SGMLParser; +# convert each value in results to Unicode (if possible), even if using +# regex-based parsing +#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain +# high-bit characters in attributes in embedded HTML in description (thanks +# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in +# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking +# about a mapped key +#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and +# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could +# cause the same encoding to be tried twice (even if it failed the first time); +# fixed DOCTYPE stripping when DOCTYPE contained entity declarations; +# better textinput and image tracking in illformed RSS 1.0 feeds +#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed +# my blink tag tests +#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that +# failed to parse utf-16 encoded feeds; made source into a FeedParserDict; +# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; +# added support for image; refactored parse() fallback logic to try other +# encodings if SAX parsing fails (previously it would only try other encodings +# if re-encoding failed); remove unichr madness in normalize_attrs now that +# we're properly tracking encoding in and out of BaseHTMLProcessor; set +# feed.language from root-level xml:lang; set entry.id from rdf:about; +# send Accept header +#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between +# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are +# windows-1252); fixed regression that could cause the same encoding to be +# tried twice (even if it failed the first time) +#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; +# recover from malformed content-type header parameter with no equals sign +# ('text/xml; charset:iso-8859-1') +#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities +# to Unicode equivalents in illformed feeds (aaronsw); added and +# passed tests for converting character entities to Unicode equivalents +# in illformed feeds (aaronsw); test for valid parsers when setting +# XML_AVAILABLE; make version and encoding available when server returns +# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like +# digest auth or proxy support); add code to parse username/password +# out of url and send as basic authentication; expose downloading-related +# exceptions in bozo_exception (aaronsw); added __contains__ method to +# FeedParserDict (aaronsw); added publisher_detail (aaronsw) +#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always +# convert feed to UTF-8 before passing to XML parser; completely revamped +# logic for determining character encoding and attempting XML parsing +# (much faster); increased default timeout to 20 seconds; test for presence +# of Location header on redirects; added tests for many alternate character +# encodings; support various EBCDIC encodings; support UTF-16BE and +# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support +# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no +# XML parsers are available; added support for 'Content-encoding: deflate'; +# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules +# are available +#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure +# problem tracking xml:base and xml:lang if element declares it, child +# doesn't, first grandchild redeclares it, and second grandchild doesn't; +# refactored date parsing; defined public registerDateHandler so callers +# can add support for additional date formats at runtime; added support +# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added +# zopeCompatibilityHack() which turns FeedParserDict into a regular +# dictionary, required for Zope compatibility, and also makes command- +# line debugging easier because pprint module formats real dictionaries +# better than dictionary-like objects; added NonXMLContentType exception, +# which is stored in bozo_exception when a feed is served with a non-XML +# media type such as 'text/plain'; respect Content-Language as default +# language if not xml:lang is present; cloud dict is now FeedParserDict; +# generator dict is now FeedParserDict; better tracking of xml:lang, +# including support for xml:lang='' to unset the current language; +# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default +# namespace; don't overwrite final status on redirects (scenarios: +# redirecting to a URL that returns 304, redirecting to a URL that +# redirects to another URL with a different type of redirect); add +# support for HTTP 303 redirects +#4.0 - MAP - support for relative URIs in xml:base attribute; fixed +# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; +# support for Atom 1.0; support for iTunes extensions; new 'tags' for +# categories/keywords/etc. as array of dict +# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 +# terminology; parse RFC 822-style dates with no time; lots of other +# bug fixes +#4.1 - MAP - removed socket timeout; added support for chardet library diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/htmltmpl.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/htmltmpl.py new file mode 100755 index 0000000..be6e41b --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/htmltmpl.py @@ -0,0 +1,1480 @@ + +""" A templating engine for separation of code and HTML. + + The documentation of this templating engine is separated to two parts: + + 1. Description of the templating language. + + 2. Documentation of classes and API of this module that provides + a Python implementation of the templating language. + + All the documentation can be found in 'doc' directory of the + distribution tarball or at the homepage of the engine. + Latest versions of this module are also available at that website. + + You can use and redistribute this module under conditions of the + GNU General Public License that can be found either at + [ http://www.gnu.org/ ] or in file "LICENSE" contained in the + distribution tarball of this module. + + Copyright (c) 2001 Tomas Styblo, tripie@cpan.org + + @name htmltmpl + @version 1.22 + @author-name Tomas Styblo + @author-email tripie@cpan.org + @website http://htmltmpl.sourceforge.net/ + @license-name GNU GPL + @license-url http://www.gnu.org/licenses/gpl.html +""" + +__version__ = 1.22 +__author__ = "Tomas Styblo (tripie@cpan.org)" + +# All imported modules are part of the standard Python library. + +from types import * +import re +import os +import os.path +import pprint # only for debugging +import sys +import copy +import cgi # for HTML escaping of variables +import urllib # for URL escaping of variables +import cPickle # for template compilation +import gettext + +INCLUDE_DIR = "inc" + +# Total number of possible parameters. +# Increment if adding a parameter to any statement. +PARAMS_NUMBER = 3 + +# Relative positions of parameters in TemplateCompiler.tokenize(). +PARAM_NAME = 1 +PARAM_ESCAPE = 2 +PARAM_GLOBAL = 3 +PARAM_GETTEXT_STRING = 1 + +# Find a way to lock files. Currently implemented only for UNIX and windows. +LOCKTYPE_FCNTL = 1 +LOCKTYPE_MSVCRT = 2 +LOCKTYPE = None +try: + import fcntl +except: + try: + import msvcrt + except: + LOCKTYPE = None + else: + LOCKTYPE = LOCKTYPE_MSVCRT +else: + LOCKTYPE = LOCKTYPE_FCNTL +LOCK_EX = 1 +LOCK_SH = 2 +LOCK_UN = 3 + +############################################## +# CLASS: TemplateManager # +############################################## + +class TemplateManager: + """ Class that manages compilation and precompilation of templates. + + You should use this class whenever you work with templates + that are stored in a file. The class can create a compiled + template and transparently manage its precompilation. It also + keeps the precompiled templates up-to-date by modification times + comparisons. + """ + + def __init__(self, include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0): + """ Constructor. + + @header + __init__(include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0) + + @param include Enable or disable included templates. + This optional parameter can be used to enable or disable + TMPL_INCLUDE inclusion of templates. Disabling of + inclusion can improve performance a bit. The inclusion is + enabled by default. + + @param max_include Maximum depth of nested inclusions. + This optional parameter can be used to specify maximum depth of + nested TMPL_INCLUDE inclusions. It defaults to 5. + This setting prevents infinite recursive inclusions. + + @param precompile Enable or disable precompilation of templates. + This optional parameter can be used to enable or disable + creation and usage of precompiled templates. + + A precompiled template is saved to the same directory in + which the main template file is located. You need write + permissions to that directory. + + Precompilation provides a significant performance boost because + it's not necessary to parse the templates over and over again. + The boost is especially noticeable when templates that include + other templates are used. + + Comparison of modification times of the main template and all + included templates is used to ensure that the precompiled + templates are up-to-date. Templates are also recompiled if the + htmltmpl module is updated. + + The TemplateErrorexception is raised when the precompiled + template cannot be saved. Precompilation is enabled by default. + + Precompilation is available only on UNIX and Windows platforms, + because proper file locking which is necessary to ensure + multitask safe behaviour is platform specific and is not + implemented for other platforms. Attempts to enable precompilation + on the other platforms result in raise of the + TemplateError exception. + + @param comments Enable or disable template comments. + This optional parameter can be used to enable or disable + template comments. + Disabling of the comments can improve performance a bit. + Comments are enabled by default. + + @param gettext Enable or disable gettext support. + + @param debug Enable or disable debugging messages. + This optional parameter is a flag that can be used to enable + or disable debugging messages which are printed to the standard + error output. The debugging messages are disabled by default. + """ + # Save the optional parameters. + # These values are not modified by any method. + self._include = include + self._max_include = max_include + self._precompile = precompile + self._comments = comments + self._gettext = gettext + self._debug = debug + + # Find what module to use to lock files. + # File locking is necessary for the 'precompile' feature to be + # multitask/thread safe. Currently it works only on UNIX + # and Windows. Anyone willing to implement it on Mac ? + if precompile and not LOCKTYPE: + raise TemplateError, "Template precompilation is not "\ + "available on this platform." + self.DEB("INIT DONE") + + def prepare(self, file): + """ Preprocess, parse, tokenize and compile the template. + + If precompilation is enabled then this method tries to load + a precompiled form of the template from the same directory + in which the template source file is located. If it succeeds, + then it compares modification times stored in the precompiled + form to modification times of source files of the template, + including source files of all templates included via the + TMPL_INCLUDE statements. If any of the modification times + differs, then the template is recompiled and the precompiled + form updated. + + If precompilation is disabled, then this method parses and + compiles the template. + + @header prepare(file) + + @return Compiled template. + The methods returns an instance of the Template class + which is a compiled form of the template. This instance can be + used as input for the TemplateProcessor. + + @param file Path to the template file to prepare. + The method looks for the template file in current directory + if the parameter is a relative path. All included templates must + be placed in subdirectory 'inc' of the + directory in which the main template file is located. + """ + compiled = None + if self._precompile: + if self.is_precompiled(file): + try: + precompiled = self.load_precompiled(file) + except PrecompiledError, template: + print >> sys.stderr, "Htmltmpl: bad precompiled "\ + "template '%s' removed" % template + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + precompiled.debug(self._debug) + compile_params = (self._include, self._max_include, + self._comments, self._gettext) + if precompiled.is_uptodate(compile_params): + self.DEB("PRECOMPILED: UPTODATE") + compiled = precompiled + else: + self.DEB("PRECOMPILED: NOT UPTODATE") + compiled = self.update(precompiled) + else: + self.DEB("PRECOMPILED: NOT PRECOMPILED") + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + self.DEB("PRECOMPILATION DISABLED") + compiled = self.compile(file) + return compiled + + def update(self, template): + """ Update (recompile) a compiled template. + + This method recompiles a template compiled from a file. + If precompilation is enabled then the precompiled form saved on + disk is also updated. + + @header update(template) + + @return Recompiled template. + It's ensured that the returned template is up-to-date. + + @param template A compiled template. + This parameter should be an instance of the Template + class, created either by the TemplateManager or by the + TemplateCompiler. The instance must represent a template + compiled from a file on disk. + """ + self.DEB("UPDATE") + updated = self.compile(template.file()) + if self._precompile: + self.save_precompiled(updated) + return updated + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def lock_file(self, file, lock): + """ Provide platform independent file locking. + @hidden + """ + fd = file.fileno() + if LOCKTYPE == LOCKTYPE_FCNTL: + if lock == LOCK_SH: + fcntl.flock(fd, fcntl.LOCK_SH) + elif lock == LOCK_EX: + fcntl.flock(fd, fcntl.LOCK_EX) + elif lock == LOCK_UN: + fcntl.flock(fd, fcntl.LOCK_UN) + else: + raise TemplateError, "BUG: bad lock in lock_file" + elif LOCKTYPE == LOCKTYPE_MSVCRT: + if lock == LOCK_SH: + # msvcrt does not support shared locks :-( + msvcrt.locking(fd, msvcrt.LK_LOCK, 1) + elif lock == LOCK_EX: + msvcrt.locking(fd, msvcrt.LK_LOCK, 1) + elif lock == LOCK_UN: + msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) + else: + raise TemplateError, "BUG: bad lock in lock_file" + else: + raise TemplateError, "BUG: bad locktype in lock_file" + + def compile(self, file): + """ Compile the template. + @hidden + """ + return TemplateCompiler(self._include, self._max_include, + self._comments, self._gettext, + self._debug).compile(file) + + def is_precompiled(self, file): + """ Return true if the template is already precompiled on the disk. + This method doesn't check whether the compiled template is + uptodate. + @hidden + """ + filename = file + "c" # "template.tmplc" + if os.path.isfile(filename): + return 1 + else: + return 0 + + def load_precompiled(self, file): + """ Load precompiled template from disk. + + Remove the precompiled template file and recompile it + if the file contains corrupted or unpicklable data. + + @hidden + """ + filename = file + "c" # "template.tmplc" + self.DEB("LOADING PRECOMPILED") + try: + remove_bad = 0 + file = None + try: + file = open(filename, "rb") + self.lock_file(file, LOCK_SH) + precompiled = cPickle.load(file) + except IOError, (errno, errstr): + raise TemplateError, "IO error in load precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.UnpicklingError: + remove_bad = 1 + raise PrecompiledError, filename + except: + remove_bad = 1 + raise + else: + return precompiled + finally: + if file: + self.lock_file(file, LOCK_UN) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + def save_precompiled(self, template): + """ Save compiled template to disk in precompiled form. + + Associated metadata is also saved. It includes: filename of the + main template file, modification time of the main template file, + modification times of all included templates and version of the + htmltmpl module which compiled the template. + + The method removes a file which is saved only partially because + of some error. + + @hidden + """ + filename = template.file() + "c" # creates "template.tmplc" + # Check if we have write permission to the template's directory. + template_dir = os.path.dirname(os.path.abspath(filename)) + if not os.access(template_dir, os.W_OK): + raise TemplateError, "Cannot save precompiled templates "\ + "to '%s': write permission denied."\ + % template_dir + try: + remove_bad = 0 + file = None + try: + file = open(filename, "wb") # may truncate existing file + self.lock_file(file, LOCK_EX) + BINARY = 1 + READABLE = 0 + if self._debug: + cPickle.dump(template, file, READABLE) + else: + cPickle.dump(template, file, BINARY) + except IOError, (errno, errstr): + remove_bad = 1 + raise TemplateError, "IO error while saving precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.PicklingError, error: + remove_bad = 1 + raise TemplateError, "Pickling error while saving "\ + "precompiled template '%s': %s"\ + % (filename, error) + except: + remove_bad = 1 + raise + else: + self.DEB("SAVING PRECOMPILED") + finally: + if file: + self.lock_file(file, LOCK_UN) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + +############################################## +# CLASS: TemplateProcessor # +############################################## + +class TemplateProcessor: + """ Fill the template with data and process it. + + This class provides actual processing of a compiled template. + Use it to set template variables and loops and then obtain + result of the processing. + """ + + def __init__(self, html_escape=1, magic_vars=1, global_vars=0, debug=0): + """ Constructor. + + @header __init__(html_escape=1, magic_vars=1, global_vars=0, + debug=0) + + @param html_escape Enable or disable HTML escaping of variables. + This optional parameter is a flag that can be used to enable or + disable automatic HTML escaping of variables. + All variables are by default automatically HTML escaped. + The escaping process substitutes HTML brackets, ampersands and + double quotes with appropriate HTML entities. + + @param magic_vars Enable or disable loop magic variables. + This parameter can be used to enable or disable + "magic" context variables, that are automatically defined inside + loops. Magic variables are enabled by default. + + Refer to the language specification for description of these + magic variables. + + @param global_vars Globally activate global lookup of variables. + This optional parameter is a flag that can be used to specify + whether variables which cannot be found in the current scope + should be automatically looked up in enclosing scopes. + + Automatic global lookup is disabled by default. Global lookup + can be overriden on a per-variable basis by the + GLOBAL parameter of a TMPL_VAR + statement. + + @param debug Enable or disable debugging messages. + """ + self._html_escape = html_escape + self._magic_vars = magic_vars + self._global_vars = global_vars + self._debug = debug + + # Data structure containing variables and loops set by the + # application. Use debug=1, process some template and + # then check stderr to see how the structure looks. + # It's modified only by set() and reset() methods. + self._vars = {} + + # Following variables are for multipart templates. + self._current_part = 1 + self._current_pos = 0 + + def set(self, var, value): + """ Associate a value with top-level template variable or loop. + + A template identifier can represent either an ordinary variable + (string) or a loop. + + To assign a value to a string identifier pass a scalar + as the 'value' parameter. This scalar will be automatically + converted to string. + + To assign a value to a loop identifier pass a list of mappings as + the 'value' parameter. The engine iterates over this list and + assigns values from the mappings to variables in a template loop + block if a key in the mapping corresponds to a name of a variable + in the loop block. The number of mappings contained in this list + is equal to number of times the loop block is repeated in the + output. + + @header set(var, value) + @return No return value. + + @param var Name of template variable or loop. + @param value The value to associate. + + """ + # The correctness of character case is verified only for top-level + # variables. + if self.is_ordinary_var(value): + # template top-level ordinary variable + if not var.islower(): + raise TemplateError, "Invalid variable name '%s'." % var + elif type(value) == ListType: + # template top-level loop + if var != var.capitalize(): + raise TemplateError, "Invalid loop name '%s'." % var + else: + raise TemplateError, "Value of toplevel variable '%s' must "\ + "be either a scalar or a list." % var + self._vars[var] = value + self.DEB("VALUE SET: " + str(var)) + + def reset(self, keep_data=0): + """ Reset the template data. + + This method resets the data contained in the template processor + instance. The template processor instance can be used to process + any number of templates, but this method must be called after + a template is processed to reuse the instance, + + @header reset(keep_data=0) + @return No return value. + + @param keep_data Do not reset the template data. + Use this flag if you do not want the template data to be erased. + This way you can reuse the data contained in the instance of + the TemplateProcessor. + """ + self._current_part = 1 + self._current_pos = 0 + if not keep_data: + self._vars.clear() + self.DEB("RESET") + + def process(self, template, part=None): + """ Process a compiled template. Return the result as string. + + This method actually processes a template and returns + the result. + + @header process(template, part=None) + @return Result of the processing as string. + + @param template A compiled template. + Value of this parameter must be an instance of the + Template class created either by the + TemplateManager or by the TemplateCompiler. + + @param part The part of a multipart template to process. + This parameter can be used only together with a multipart + template. It specifies the number of the part to process. + It must be greater than zero, because the parts are numbered + from one. + + The parts must be processed in the right order. You + cannot process a part which precedes an already processed part. + + If this parameter is not specified, then the whole template + is processed, or all remaining parts are processed. + """ + self.DEB("APP INPUT:") + if self._debug: pprint.pprint(self._vars, sys.stderr) + if part != None and (part == 0 or part < self._current_part): + raise TemplateError, "process() - invalid part number" + + # This flag means "jump behind the end of current statement" or + # "skip the parameters of current statement". + # Even parameters that actually are not present in the template + # do appear in the list of tokens as empty items ! + skip_params = 0 + + # Stack for enabling or disabling output in response to TMPL_IF, + # TMPL_UNLESS, TMPL_ELSE and TMPL_LOOPs with no passes. + output_control = [] + ENABLE_OUTPUT = 1 + DISABLE_OUTPUT = 0 + + # Stacks for data related to loops. + loop_name = [] # name of a loop + loop_pass = [] # current pass of a loop (counted from zero) + loop_start = [] # index of loop start in token list + loop_total = [] # total number of passes in a loop + + tokens = template.tokens() + len_tokens = len(tokens) + out = "" # buffer for processed output + + # Recover position at which we ended after processing of last part. + i = self._current_pos + + # Process the list of tokens. + while 1: + if i == len_tokens: break + if skip_params: + # Skip the parameters following a statement. + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token.startswith("." + escape = tokens[i + PARAM_ESCAPE] + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + + # If output of current block is not disabled then append + # the substitued and escaped variable to the output. + if DISABLE_OUTPUT not in output_control: + value = str(self.find_value(var, loop_name, loop_pass, + loop_total, globalp)) + out += self.escape(value, escape) + self.DEB("VAR: " + str(var)) + + elif token == "." + skip_params = 1 + + # Find total number of passes in this loop. + passtotal = self.find_value(var, loop_name, loop_pass, + loop_total) + if not passtotal: passtotal = 0 + # Push data for this loop on the stack. + loop_total.append(passtotal) + loop_start.append(i) + loop_pass.append(0) + loop_name.append(var) + + # Disable output of loop block if the number of passes + # in this loop is zero. + if passtotal == 0: + # This loop is empty. + output_control.append(DISABLE_OUTPUT) + self.DEB("LOOP: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("LOOP: FIRST PASS: %s TOTAL: %d"\ + % (var, passtotal)) + + elif token == "." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(ENABLE_OUTPUT) + self.DEB("IF: ENABLE: " + str(var)) + else: + output_control.append(DISABLE_OUTPUT) + self.DEB("IF: DISABLE: " + str(var)) + + elif token == "." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(DISABLE_OUTPUT) + self.DEB("UNLESS: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("UNLESS: ENABLE: " + str(var)) + + elif token == "." + + # If this loop was not disabled, then record the pass. + if loop_total[-1] > 0: loop_pass[-1] += 1 + + if loop_pass[-1] == loop_total[-1]: + # There are no more passes in this loop. Pop + # the loop from stack. + loop_pass.pop() + loop_name.pop() + loop_start.pop() + loop_total.pop() + output_control.pop() + self.DEB("LOOP: END") + else: + # Jump to the beggining of this loop block + # to process next pass of the loop. + i = loop_start[-1] + self.DEB("LOOP: NEXT PASS") + + elif token == "." + output_control.pop() + self.DEB("IF: END") + + elif token == "." + output_control.pop() + self.DEB("UNLESS: END") + + elif token == "." + if output_control[-1] == DISABLE_OUTPUT: + # Condition was false, activate the ELSE block. + output_control[-1] = ENABLE_OUTPUT + self.DEB("ELSE: ENABLE") + elif output_control[-1] == ENABLE_OUTPUT: + # Condition was true, deactivate the ELSE block. + output_control[-1] = DISABLE_OUTPUT + self.DEB("ELSE: DISABLE") + else: + raise TemplateError, "BUG: ELSE: INVALID FLAG" + + elif token == " +

+ HTMLTMPL WARNING:
+ Cannot include template: %s +

+
+ """ % filename + self.DEB("CANNOT INCLUDE WARNING") + + elif token == "." % token + + elif DISABLE_OUTPUT not in output_control: + # Raw textual template data. + # If output of current block is not disabled, then + # append template data to the output buffer. + out += token + + i += 1 + # end of the big while loop + + # Check whether all opening statements were closed. + if loop_name: raise TemplateError, "Missing ." + if output_control: raise TemplateError, "Missing or " + return out + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def find_value(self, var, loop_name, loop_pass, loop_total, + global_override=None): + """ Search the self._vars data structure to find variable var + located in currently processed pass of a loop which + is currently being processed. If the variable is an ordinary + variable, then return it. + + If the variable is an identificator of a loop, then + return the total number of times this loop will + be executed. + + Return an empty string, if the variable is not + found at all. + + @hidden + """ + # Search for the requested variable in magic vars if the name + # of the variable starts with "__" and if we are inside a loop. + if self._magic_vars and var.startswith("__") and loop_name: + return self.magic_var(var, loop_pass[-1], loop_total[-1]) + + # Search for an ordinary variable or for a loop. + # Recursively search in self._vars for the requested variable. + scope = self._vars + globals = [] + for i in range(len(loop_name)): + # If global lookup is on then push the value on the stack. + if ((self._global_vars and global_override != "0") or \ + global_override == "1") and scope.has_key(var) and \ + self.is_ordinary_var(scope[var]): + globals.append(scope[var]) + + # Descent deeper into the hierarchy. + if scope.has_key(loop_name[i]) and scope[loop_name[i]]: + scope = scope[loop_name[i]][loop_pass[i]] + else: + return "" + + if scope.has_key(var): + # Value exists in current loop. + if type(scope[var]) == ListType: + # The requested value is a loop. + # Return total number of its passes. + return len(scope[var]) + else: + return scope[var] + elif globals and \ + ((self._global_vars and global_override != "0") or \ + global_override == "1"): + # Return globally looked up value. + return globals.pop() + else: + # No value found. + if var[0].isupper(): + # This is a loop name. + # Return zero, because the user wants to know number + # of its passes. + return 0 + else: + return "" + + def magic_var(self, var, loop_pass, loop_total): + """ Resolve and return value of a magic variable. + Raise an exception if the magic variable is not recognized. + + @hidden + """ + self.DEB("MAGIC: '%s', PASS: %d, TOTAL: %d"\ + % (var, loop_pass, loop_total)) + if var == "__FIRST__": + if loop_pass == 0: + return 1 + else: + return 0 + elif var == "__LAST__": + if loop_pass == loop_total - 1: + return 1 + else: + return 0 + elif var == "__INNER__": + # If this is neither the first nor the last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + return 1 + else: + return 0 + elif var == "__PASS__": + # Magic variable __PASS__ counts passes from one. + return loop_pass + 1 + elif var == "__PASSTOTAL__": + return loop_total + elif var == "__ODD__": + # Internally pass numbers stored in loop_pass are counted from + # zero. But the template language presents them counted from one. + # Therefore we must add one to the actual loop_pass value to get + # the value we present to the user. + if (loop_pass + 1) % 2 != 0: + return 1 + else: + return 0 + elif var.startswith("__EVERY__"): + # Magic variable __EVERY__x is never true in first or last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + # Check if an integer follows the variable name. + try: + every = int(var[9:]) # nine is length of "__EVERY__" + except ValueError: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Invalid pass number." + else: + if not every: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Pass number cannot be zero." + elif (loop_pass + 1) % every == 0: + self.DEB("MAGIC: EVERY: " + str(every)) + return 1 + else: + return 0 + else: + return 0 + else: + raise TemplateError, "Invalid magic variable '%s'." % var + + def escape(self, str, override=""): + """ Escape a string either by HTML escaping or by URL escaping. + @hidden + """ + ESCAPE_QUOTES = 1 + if (self._html_escape and override != "NONE" and override != "0" and \ + override != "URL") or override == "HTML" or override == "1": + return cgi.escape(str, ESCAPE_QUOTES) + elif override == "URL": + return urllib.quote_plus(str) + else: + return str + + def is_ordinary_var(self, var): + """ Return true if var is a scalar. (not a reference to loop) + @hidden + """ + if type(var) == StringType or type(var) == IntType or \ + type(var) == LongType or type(var) == FloatType: + return 1 + else: + return 0 + + +############################################## +# CLASS: TemplateCompiler # +############################################## + +class TemplateCompiler: + """ Preprocess, parse, tokenize and compile the template. + + This class parses the template and produces a 'compiled' form + of it. This compiled form is an instance of the Template + class. The compiled form is used as input for the TemplateProcessor + which uses it to actually process the template. + + This class should be used direcly only when you need to compile + a template from a string. If your template is in a file, then you + should use the TemplateManager class which provides + a higher level interface to this class and also can save the + compiled template to disk in a precompiled form. + """ + + def __init__(self, include=1, max_include=5, comments=1, gettext=0, + debug=0): + """ Constructor. + + @header __init__(include=1, max_include=5, comments=1, gettext=0, + debug=0) + + @param include Enable or disable included templates. + @param max_include Maximum depth of nested inclusions. + @param comments Enable or disable template comments. + @param gettext Enable or disable gettext support. + @param debug Enable or disable debugging messages. + """ + + self._include = include + self._max_include = max_include + self._comments = comments + self._gettext = gettext + self._debug = debug + + # This is a list of filenames of all included templates. + # It's modified by the include_templates() method. + self._include_files = [] + + # This is a counter of current inclusion depth. It's used to prevent + # infinite recursive includes. + self._include_level = 0 + + def compile(self, file): + """ Compile template from a file. + + @header compile(file) + @return Compiled template. + The return value is an instance of the Template + class. + + @param file Filename of the template. + See the prepare() method of the TemplateManager + class for exaplanation of this parameter. + """ + + self.DEB("COMPILING FROM FILE: " + file) + self._include_path = os.path.join(os.path.dirname(file), INCLUDE_DIR) + tokens = self.parse(self.read(file)) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, file, self._include_files, + tokens, compile_params, self._debug) + + def compile_string(self, data): + """ Compile template from a string. + + This method compiles a template from a string. The + template cannot include any templates. + TMPL_INCLUDE statements are turned into warnings. + + @header compile_string(data) + @return Compiled template. + The return value is an instance of the Template + class. + + @param data String containing the template data. + """ + self.DEB("COMPILING FROM STRING") + self._include = 0 + tokens = self.parse(data) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, None, None, tokens, compile_params, + self._debug) + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def read(self, filename): + """ Read content of file and return it. Raise an error if a problem + occurs. + @hidden + """ + self.DEB("READING: " + filename) + try: + f = None + try: + f = open(filename, "r") + data = f.read() + except IOError, (errno, errstr): + raise TemplateError, "IO error while reading template '%s': "\ + "(%d) %s" % (filename, errno, errstr) + else: + return data + finally: + if f: f.close() + + def parse(self, template_data): + """ Parse the template. This method is recursively called from + within the include_templates() method. + + @return List of processing tokens. + @hidden + """ + if self._comments: + self.DEB("PREPROCESS: COMMENTS") + template_data = self.remove_comments(template_data) + tokens = self.tokenize(template_data) + if self._include: + self.DEB("PREPROCESS: INCLUDES") + self.include_templates(tokens) + return tokens + + def remove_comments(self, template_data): + """ Remove comments from the template data. + @hidden + """ + pattern = r"### .*" + return re.sub(pattern, "", template_data) + + def include_templates(self, tokens): + """ Process TMPL_INCLUDE statements. Use the include_level counter + to prevent infinite recursion. Record paths to all included + templates to self._include_files. + @hidden + """ + i = 0 + out = "" # buffer for output + skip_params = 0 + + # Process the list of tokens. + while 1: + if i == len(tokens): break + if skip_params: + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token == "." + self._include_level += 1 + if self._include_level > self._max_include: + # Do not include the template. + # Protection against infinite recursive includes. + skip_params = 1 + self.DEB("INCLUDE: LIMIT REACHED: " + filename) + else: + # Include the template. + skip_params = 0 + include_file = os.path.join(self._include_path, filename) + self._include_files.append(include_file) + include_data = self.read(include_file) + include_tokens = self.parse(include_data) + + # Append the tokens from the included template to actual + # position in the tokens list, replacing the TMPL_INCLUDE + # token and its parameters. + tokens[i:i+PARAMS_NUMBER+1] = include_tokens + i = i + len(include_tokens) + self.DEB("INCLUDED: " + filename) + continue # Do not increment 'i' below. + i += 1 + # end of the main while loop + + if self._include_level > 0: self._include_level -= 1 + return out + + def tokenize(self, template_data): + """ Split the template into tokens separated by template statements. + The statements itself and associated parameters are also + separately included in the resulting list of tokens. + Return list of the tokens. + + @hidden + """ + self.DEB("TOKENIZING TEMPLATE") + # NOTE: The TWO double quotes in character class in the regexp below + # are there only to prevent confusion of syntax highlighter in Emacs. + pattern = r""" + (?:^[ \t]+)? # eat spaces, tabs (opt.) + (< + (?:!--[ ])? # comment start + space (opt.) + /?TMPL_[A-Z]+ # closing slash / (opt.) + statement + [ a-zA-Z0-9""/.=:_\\-]* # this spans also comments ending (--) + >) + [%s]? # eat trailing newline (opt.) + """ % os.linesep + rc = re.compile(pattern, re.VERBOSE | re.MULTILINE) + split = rc.split(template_data) + tokens = [] + for statement in split: + if statement.startswith(" 0 and '=' not in params[0]: + # implicit identifier + name = params[0] + del params[0] + else: + # explicit identifier as a 'NAME' parameter + name = self.find_param("NAME", params) + self.DEB("TOKENIZER: NAME: " + str(name)) + return name + + def find_param(self, param, params): + """ Extract value of parameter from a statement. + @hidden + """ + for pair in params: + name, value = pair.split("=") + if not name or not value: + raise TemplateError, "Syntax error in template." + if name == param: + if value[0] == '"': + # The value is in double quotes. + ret_value = value[1:-1] + else: + # The value is without double quotes. + ret_value = value + self.DEB("TOKENIZER: PARAM: '%s' => '%s'" % (param, ret_value)) + return ret_value + else: + self.DEB("TOKENIZER: PARAM: '%s' => NOT DEFINED" % param) + return None + + +############################################## +# CLASS: Template # +############################################## + +class Template: + """ This class represents a compiled template. + + This class provides storage and methods for the compiled template + and associated metadata. It's serialized by pickle if we need to + save the compiled template to disk in a precompiled form. + + You should never instantiate this class directly. Always use the + TemplateManager or TemplateCompiler classes to + create the instances of this class. + + The only method which you can directly use is the is_uptodate + method. + """ + + def __init__(self, version, file, include_files, tokens, compile_params, + debug=0): + """ Constructor. + @hidden + """ + self._version = version + self._file = file + self._tokens = tokens + self._compile_params = compile_params + self._debug = debug + self._mtime = None + self._include_mtimes = {} + + if not file: + self.DEB("TEMPLATE WAS COMPILED FROM A STRING") + return + + # Save modifitcation time of the main template file. + if os.path.isfile(file): + self._mtime = os.path.getmtime(file) + else: + raise TemplateError, "Template: file does not exist: '%s'" % file + + # Save modificaton times of all included template files. + for inc_file in include_files: + if os.path.isfile(inc_file): + self._include_mtimes[inc_file] = os.path.getmtime(inc_file) + else: + raise TemplateError, "Template: file does not exist: '%s'"\ + % inc_file + + self.DEB("NEW TEMPLATE CREATED") + + def is_uptodate(self, compile_params=None): + """ Check whether the compiled template is uptodate. + + Return true if this compiled template is uptodate. + Return false, if the template source file was changed on the + disk since it was compiled. + Works by comparison of modification times. + Also takes modification times of all included templates + into account. + + @header is_uptodate(compile_params=None) + @return True if the template is uptodate, false otherwise. + + @param compile_params Only for internal use. + Do not use this optional parameter. It's intended only for + internal use by the TemplateManager. + """ + if not self._file: + self.DEB("TEMPLATE COMPILED FROM A STRING") + return 0 + + if self._version != __version__: + self.DEB("TEMPLATE: VERSION NOT UPTODATE") + return 0 + + if compile_params != None and compile_params != self._compile_params: + self.DEB("TEMPLATE: DIFFERENT COMPILATION PARAMS") + return 0 + + # Check modification times of the main template and all included + # templates. If the included template no longer exists, then + # the problem will be resolved when the template is recompiled. + + # Main template file. + if not (os.path.isfile(self._file) and \ + self._mtime == os.path.getmtime(self._file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + self._file) + return 0 + + # Included templates. + for inc_file in self._include_mtimes.keys(): + if not (os.path.isfile(inc_file) and \ + self._include_mtimes[inc_file] == \ + os.path.getmtime(inc_file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + inc_file) + return 0 + else: + self.DEB("TEMPLATE: UPTODATE") + return 1 + + def tokens(self): + """ Get tokens of this template. + @hidden + """ + return self._tokens + + def file(self): + """ Get filename of the main file of this template. + @hidden + """ + return self._file + + def debug(self, debug): + """ Get debugging state. + @hidden + """ + self._debug = debug + + ############################################## + # PRIVATE METHODS # + ############################################## + + def __getstate__(self): + """ Used by pickle when the class is serialized. + Remove the 'debug' attribute before serialization. + @hidden + """ + dict = copy.copy(self.__dict__) + del dict["_debug"] + return dict + + def __setstate__(self, dict): + """ Used by pickle when the class is unserialized. + Add the 'debug' attribute. + @hidden + """ + dict["_debug"] = 0 + self.__dict__ = dict + + + def DEB(self, str): + """ Print debugging message to stderr. + @hidden + """ + if self._debug: print >> sys.stderr, str + + +############################################## +# EXCEPTIONS # +############################################## + +class TemplateError(Exception): + """ Fatal exception. Raised on runtime or template syntax errors. + + This exception is raised when a runtime error occurs or when a syntax + error in the template is found. It has one parameter which always + is a string containing a description of the error. + + All potential IOError exceptions are handled by the module and are + converted to TemplateError exceptions. That means you should catch the + TemplateError exception if there is a possibility that for example + the template file will not be accesssible. + + The exception can be raised by constructors or by any method of any + class. + + The instance is no longer usable when this exception is raised. + """ + + def __init__(self, error): + """ Constructor. + @hidden + """ + Exception.__init__(self, "Htmltmpl error: " + error) + + +class PrecompiledError(Exception): + """ This exception is _PRIVATE_ and non fatal. + @hidden + """ + + def __init__(self, template): + """ Constructor. + @hidden + """ + Exception.__init__(self, template) + diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/sanitize.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/sanitize.py new file mode 100755 index 0000000..c98b14d --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/sanitize.py @@ -0,0 +1,354 @@ +""" +sanitize: bringing sanitiy to world of messed-up data +""" + +__author__ = ["Mark Pilgrim ", + "Aaron Swartz "] +__contributors__ = ["Sam Ruby "] +__license__ = "BSD" +__version__ = "0.25" + +_debug = 0 + +# If you want sanitize to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +import sgmllib, re + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 + + _chardet = lambda data: chardet.detect(data)['encoding'] +except: + chardet = None + _chardet = lambda data: None + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + _r_barebang = re.compile(r'') + + def __init__(self, encoding): + self.encoding = encoding + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + def feed(self, data): + data = self._r_barebang.sub(r'<!\1', data) + data = self._r_bareamp.sub("&", data) + data = self._r_shorttag.sub(self._shorttag_replace, data) + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + + def normalize_attrs(self, attrs): + # utility method to be called by descendants + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
, tag='pre', attrs=[('class', 'screen')]
+        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
+        uattrs = []
+        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
+        for key, value in attrs:
+            if type(value) != type(u''):
+                value = unicode(value, self.encoding)
+            uattrs.append((unicode(key, self.encoding), value))
+        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
+        if tag in self.elements_no_end_tag:
+            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
+        else:
+            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
+
+    def unknown_endtag(self, tag):
+        # called for each end tag, e.g. for 
, tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + self.pieces.append('&%(ref)s;' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', + 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', + 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', + 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', + 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'textarea', 'tbody', 'td', 'tfoot', 'th', + 'thead', 'tr', 'tt', 'u', 'ul', 'var'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', + 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', + 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', + 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', + 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', + 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', + 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', + 'usemap', 'valign', 'value', 'vspace', 'width'] + + ignorable_elements = ['script', 'applet', 'style'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.tag_stack = [] + self.ignore_level = 0 + + def feed(self, data): + _BaseHTMLProcessor.feed(self, data) + while self.tag_stack: + _BaseHTMLProcessor.unknown_endtag(self, self.tag_stack.pop()) + + def unknown_starttag(self, tag, attrs): + if tag in self.ignorable_elements: + self.ignore_level += 1 + return + + if self.ignore_level: + return + + if tag in self.acceptable_elements: + attrs = self.normalize_attrs(attrs) + attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] + if tag not in self.elements_no_end_tag: + self.tag_stack.append(tag) + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + + def unknown_endtag(self, tag): + if tag in self.ignorable_elements: + self.ignore_level -= 1 + return + + if self.ignore_level: + return + + if tag in self.acceptable_elements and tag not in self.elements_no_end_tag: + match = False + while self.tag_stack: + top = self.tag_stack.pop() + if top == tag: + match = True + break + _BaseHTMLProcessor.unknown_endtag(self, top) + + if match: + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.ignore_level: + text = text.replace('<', '') + _BaseHTMLProcessor.handle_data(self, text) + +def HTML(htmlSource, encoding='utf8'): + p = _HTMLSanitizer(encoding) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count(''): + data = data.split('>', 1)[1] + if data.count('' % self.url) + + def test_changedurl(self): + # change the URL directly + self.channel.url = self.changed_url + self.assertEqual(self.channel.feed_information(), + "<%s> (formerly <%s>)" % (self.changed_url, self.url)) + +if __name__ == '__main__': + unittest.main() diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/tests/test_main.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/tests/test_main.py new file mode 100755 index 0000000..c2be62d --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/tests/test_main.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +import os, sys, shutil, errno, unittest +from ConfigParser import ConfigParser +from StringIO import StringIO +import planet + +class MainTest(unittest.TestCase): + + def test_minimal(self): + configp = ConfigParser() + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", []) + + def test_onefeed(self): + configp = ConfigParser() + configp.readfp(StringIO("""[http://www.example.com/] +name = Mary +""")) + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", [], True) + + + def test_generateall(self): + configp = ConfigParser() + configp.readfp(StringIO("""[http://www.example.com/] +name = Mary +""")) + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", [], True) + basedir = os.path.join(os.path.dirname(os.path.abspath(sys.modules[__name__].__file__)), 'data') + os.mkdir(self.output_dir) + t_file_names = ['simple', 'simple2'] + self._remove_cached_templates(basedir, t_file_names) + t_files = [os.path.join(basedir, t_file) + '.tmpl' for t_file in t_file_names] + my_planet.generate_all_files(t_files, "Planet Name", + 'http://example.com/', 'http://example.com/feed/', 'Mary', 'mary@example.com') + for file_name in t_file_names: + name = os.path.join(self.output_dir, file_name) + content = file(name).read() + self.assertEqual(content, 'Mary\n') + + def _remove_cached_templates(self, basedir, template_files): + """ + Remove the .tmplc files and force them to be rebuilt. + + This is required mainly so that the tests don't fail in mysterious ways in + directories that have been moved, eg 'branches/my-branch' to + 'branches/mysterious-branch' -- the .tmplc files seem to remember their full + path + """ + for file in template_files: + path = os.path.join(basedir, file + '.tmplc') + try: + os.remove(path) + except OSError, e: + # we don't care about the file not being there, we care about + # everything else + if e.errno != errno.ENOENT: + raise + + def setUp(self): + super(MainTest, self).setUp() + self.output_dir = 'output' + + def tearDown(self): + super(MainTest, self).tearDown() + shutil.rmtree(self.output_dir, ignore_errors = True) + shutil.rmtree('cache', ignore_errors = True) + +if __name__ == '__main__': + unittest.main() diff --git a/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/tests/test_sanitize.py b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/tests/test_sanitize.py new file mode 100755 index 0000000..f0f1d42 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/gezegen/planet/tests/test_sanitize.py @@ -0,0 +1,125 @@ +# adapted from http://www.iamcal.com/publish/articles/php/processing_html_part_2/ +# and from http://feedparser.org/tests/wellformed/sanitize/ +# by Aaron Swartz, 2006, public domain + +import unittest, new +from planet import sanitize + +class SanitizeTest(unittest.TestCase): pass + +# each call to HTML adds a test case to SanitizeTest +testcases = 0 +def HTML(a, b): + global testcases + testcases += 1 + func = lambda self: self.assertEqual(sanitize.HTML(a), b) + method = new.instancemethod(func, None, SanitizeTest) + setattr(SanitizeTest, "test_%d" % testcases, method) + +## basics +HTML("","") +HTML("hello","hello") + +## balancing tags +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("","") + +## trailing slashes +HTML('','') +HTML('','') +HTML('','') + +## balancing angle brakets +HTML('','b>') +HTML('','>') +HTML('foofoo','b>foo') +HTML('>') +HTML('b><','b>') +HTML('>','>') + +## attributes +HTML('','') +HTML('','') +HTML('','') + +## dangerous tags (a small sample) +sHTML = lambda x: HTML(x, 'safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') + +for x in ['onabort', 'onblur', 'onchange', 'onclick', 'ondblclick', 'onerror', 'onfocus', 'onkeydown', 'onkeypress', 'onkeyup', 'onload', 'onmousedown', 'onmouseout', 'onmouseover', 'onmouseup', 'onreset', 'resize', 'onsubmit', 'onunload']: + HTML('' % x, + '') + +HTML('never trust your upstream platypus', 'never trust your upstream platypus') + +## ignorables +HTML('foo', 'foo') + +## non-allowed tags +HTML('','') +HTML('\r\n\r\n\r\n\r\n\r\nfunction executeMe()\r\n{\r\n\r\n\r\n\r\n\r\n/* + + + + + + {% endblock %} + +
+
+ RSS + Atom +
+
+ +
+ +
+ {% block menu %} + + + {% endblock %} +
+ +

Gezegen her 10 dakikada bir yenilenir. Son güncelleme: {{ run_time.get_run_time }}

+ +
+ + {% block body %} + {% endblock %} + + +
+ + {% block footer%} + + {% endblock %} + + + + + +
+ + diff --git a/DJAGEN/branches/mustafa_branch/djagen/templates/main/feeds.html b/DJAGEN/branches/mustafa_branch/djagen/templates/main/feeds.html new file mode 100755 index 0000000..f2bd421 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/templates/main/feeds.html @@ -0,0 +1,26 @@ +
+ +
diff --git a/DJAGEN/branches/mustafa_branch/djagen/templates/main/index.html b/DJAGEN/branches/mustafa_branch/djagen/templates/main/index.html new file mode 100755 index 0000000..35a41a3 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/templates/main/index.html @@ -0,0 +1,915 @@ + + + + Linux Gezegeni + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+

16 Mart 2010

+ +
+ + +
+
+
+

+ +Yakın Doğu’da Seminer Rüzgarları +

+
+
+
+

Geçen haftadan beri Yakın Doğu Üniversitesi’nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir’lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu’nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.

+

Yakın Doğu’ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.

+

+

Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.

+

+

Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.

+

+

Ali Erdinc’in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.

+

+

Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite duyuru sayfası, Facebook ve Twitter‘dan takip edebileceklerini söyleyelim. Hatta Kıbrıs’ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.

+

Lefkoşa’ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs’tan bildirdi.

+

+
+
+
+ + + + + + + + + +
+
+ +
+
+
+

05 Şubat 2010

+ +
+ + +
+
+
+

+ +100 ml +

+
+
+
+

1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs’a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.

+

Şimdi olayın benim açımdan garip noktalarına gelelim

+

* Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.

+

* Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.

+

* Elimle çöpe attım, o çok koydu.

+

Ben de bunun üzerine Ulaştırma Bakanlığı’na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006′da İngiltere’de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006′da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç’te, ABD ve Kanada’da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt’lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:

+

“Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.”

+

Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.

+

Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.

+
+
+
+ + + + + + + + + +
+
+ +
+
+
+

29 Ocak 2010

+ +
+ + +
+
+
+

+ +Artık Sun yok! +

+
+
+
+

iPsunoraclead haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun’ı satın alma işlemini bitirdi. Artık www.sun.com adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.

+

Beni en çok ilgilendiren konular ise Sun’ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.

+

Umarım hepimiz için mutlu son olur…

+

Ek: Kültür Mantarı‘nın yönlendirmesi ile James Gosling’in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım…

+

sunrip


+
+
+ + + + + + +
+
+ +
+
+
+

24 Aralık 2009

+ +
+ + +
+
+
+

+ +EMO 13. Ulusal Kongresi +

+
+
+
+

EMO’nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan 13. Ulusal Kongresi kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda “Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz” ve 11.30-12.30 arasında da “Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.

+

Genel olarak yüklü bir programı olan bu etkinlikte çeşitli LKD seminerleri de olacak. Buyrunuz geliniz!


+
+
+ + + + + + +
+
+ +
+
+
+

24 Eylül 2009

+ +
+ + +
+
+
+

+ +Intel, Atom, Moblin +

+
+
+
+

Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel’e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin’i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel’den üç önemli açıklama oldu…

+

Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. Atom Developer Program‘ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var… ( Ben kayıt olacağım :) )

+

İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin’in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir akıllı telefon üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu :) Geçenlerde de yazmıştım,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız…


+
+
+ + + + + + +
+
+ +
+
+
+

25 Ağustos 2009

+ +
+ + +
+
+
+

+ +Teknik Destek Kopya Kağıtı +

+
+
+
+

xkcd’de geçen gün yayınlanan bu teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.

+

teknikdestek
+İsteyenler için ODF hali de burada


+
+
+ + + + + + +
+
+ +
+
+
+

18 Ağustos 2009

+ +
+ + +
+
+
+

+ +Korsan Değil “Fikir Hırsızı” +

+
+
+
+

Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu’nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma başlatmış. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.

+

Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek :) Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor…

+

Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.

+

Son olarak bir haber daha verelim Pirate Bay’in 23 GB’lik arşivi de paylaşıma açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin :) Korsan değil özgür yazılım kullanın!


+
+
+ + + + + + +
+
+ +
+
+
+

07 Temmuz 2009

+ +
+ + +
+
+
+

+ +Mobil Cihazlar Dünyasında Neler Oluyor? +

+
+
+
+

moblinBir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. Android, WebOS ve iPhone OS‘a  karşı Symbian‘ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan’ı açık kaynak kodlu olarak  bu vakfa devretmişti.

+

Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC’lerin geliştirilmesine olanak sağladı ve NetBook’lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.

+

Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : Moblin.

+

Moblin’e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia’nın kendi tabletlerinde kullanmak amacıyla ürettiği Maemo‘yu desteklemeye karar verdiğini açıkladı. Intel’de Moblin’i Linux Vakfı’na devrettiğini ve destek konusunda da Novell’le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia’nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo’yu Qt’ye taşıyacağını ilan etti.

+

İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo’yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID’ler üretip bunlarda Mameo’mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian’ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID’ler konusunda neler planlıyor? Bu planları içerisinde Moblin’i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo’ya yatırım mı yapacaklar? NetBook’larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?

+

Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?


+
+
+ + + + + + +
+
+ +
+
+
+

17 Haziran 2009

+ +
+ + +
+
+
+

+ +LKD Genel Kurulu için Ankara’ya +

+
+
+
+

Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara’ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan’dan…

+

***

+

Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,

+

TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca
+kurum.
+Evet bu kurum malesef bilet satmak istemiyor.

+

1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir
+sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları
+portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)

+

2- web sitesindeki bilet satış uygulamasında banka sıra makinelerinin bir
+türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk
+karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin
+bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir
+koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve
+internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.
+(İnternet olsun dedim, sonuç yok dedi.)

+

3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,
+veee… Evet, biz satıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii
+ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.

+

4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize
+bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş
+dönüş 1 tam 1 öğrenci istiyorum. Satıcı önce
+- G/D kesmiyorum diyor buradan.!
+- Nasıl yani?
+- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?
+Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.
+- Elbette G/D niye alayım indirim var diyorum.
+Neyse girip deniyor, gelen koltuk numaralarını soruyorum.
+- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)
+- Değiştiremiyor musunuz?
+- Malesef.
+- Internet sürümüne mi giriyorsunuz diyorum ister istemez.
+- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen
+üzerine ek komisyon ekleniyor sadece.)
+- Kim koltuk seçtiriyor bana ?
+- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.

+

5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.
+Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk
+seçebiliyor musunuz?
+- Bakalım yer boş olursa seçebiliriz diyor satıcı bu kez.
+- Ohh nihayet.
+- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.
+- Öğrencide G/D farkı yok cevabı geliyor.
+- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız
+öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe
+yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat
+eder.)
+- Kredi kartımı, peşin mi?
+- DIINN ! kredi kartı.. var dimi?
+- Evet, 112 TL
+- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.

+

Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye
+kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak
+bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.

+

Velhasıl,
+Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX
+Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX

+

Hayırlı yolculuklar.

+

=====================
+Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor
+daha. 2-3 nolarda satılan yerler var.

+

Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya
+satış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi
+bekliyor olabilir, kimbilir?

+

Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?


+
+
+ + + + + + +
+
+ +
+
+
+

16 Haziran 2009

+ +
+ + +
+
+
+

+ +IE, WTW ve Gıda Yardımı +

+
+
+
+

wfp-wtwBugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft’un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8′in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara buradan ulaşabilirsiniz…

+

Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin TechCrunch‘da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.

+

İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin… Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı’nın Walk The Web kampanyasına bir göz atmanızı öneririm…

+ +

Son olarak da bugünlerde herkese önerdiğim gibi Yuva ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.


+
+
+ + + + + + +
+
+ +
+
+
+

28 Mayıs 2009

+ +
+ + +
+
+
+

+ +TBD Bilişim Kongresi’nde Özgür Yazılım Paneli +

+
+
+
+

TBD’nin bu yıl 3.sünü düzenlediği İstanbul Bilişim Kongresi‘nde Pazar günü saat 14:00′de Özgür Yazılım Paneli olacaktır. Panel’de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur…

+

Yer: Marmara Üniversitesi Nişantaşı Kampüsü
+Erdal İnönü Bilim ve Kültür Merkezi
+Tarih: 31 Mayıs Pazar, 14:00 - 15:20
+Oturum başkanı: Görkem Çetin
+Konuşmacılar: Enver Altın, Hakan Uygun, Cahit Cengizhan


+
+
+ + + + + + +
+
+ +
+
+
+

13 Nisan 2009

+ +
+ + +
+
+
+

+ +Sıralama Algoritmaları +

+
+
+
+

Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu siteye bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz…


+
+
+ + + + + + +
+
+ +
+
+
+
+ + + + + + + + + + + + + + diff --git a/DJAGEN/branches/mustafa_branch/djagen/templates/main/main.html b/DJAGEN/branches/mustafa_branch/djagen/templates/main/main.html new file mode 100755 index 0000000..11093db --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/templates/main/main.html @@ -0,0 +1,36 @@ +{% extends "main/base.html" %} + + + {% block body %} + + {% for entry in entries_list|slice:items_per_page %} + {% autoescape off %} + + {% ifequal entry.entry_id.is_approved 1 %} + + {% ifchanged entry.date.day entry.date.month entry.date.year %}
{% endifchanged %} + + {% ifchanged %}

{{ entry.date|date:"d F Y" }}

{% endifchanged %} + +
+ + +

{{ entry.title }}

+

+ Yazar: {{ entry.entry_id.author_name }} + Tarih: {{ entry.date|date:"d F Y H:i" }} +

+
+ {{ entry.content_html|truncatewords_html:truncate_words }} +
+ {% endifequal %} + {% endautoescape %} + +
+ + + {% endfor %} + + + {% endblock %} + diff --git a/DJAGEN/branches/mustafa_branch/djagen/templates/main/members.html b/DJAGEN/branches/mustafa_branch/djagen/templates/main/members.html new file mode 100755 index 0000000..93eb28a --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/templates/main/members.html @@ -0,0 +1,16 @@ +{% extends "main/base.html" %} + + {% block body %} + + + + + {% endblock %} diff --git a/DJAGEN/branches/mustafa_branch/djagen/templates/main/query.html b/DJAGEN/branches/mustafa_branch/djagen/templates/main/query.html new file mode 100755 index 0000000..c3a3f25 --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/templates/main/query.html @@ -0,0 +1,11 @@ +{% extends "main/base.html" %} +{% block body %} +
+ Yazar
Adı:
+ ve/veya + Soyadı:
+ veya
+ Aradığınız Metin: + +
+{% endblock %} diff --git a/DJAGEN/branches/mustafa_branch/djagen/templates/main/subscribe.html b/DJAGEN/branches/mustafa_branch/djagen/templates/main/subscribe.html new file mode 100755 index 0000000..2e7722a --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/templates/main/subscribe.html @@ -0,0 +1,42 @@ +{% extends "main/base.html" %} + + {% block body %} +

+ Linux Gezegeni Gezegen Ekibi tarafından yönetilmektedir, Gezegen hakkındaki sorularınızı ve Gezegen'e iniş başvurularınızı e-posta ile iletebilirsiniz. +

+ +
+ +

+ Gezegene iniş başvurularınızda Gezegen Kuralları'na uyan RSS/Atom beslemenizi ve gezegen içerisinde kullanmak istediğiniz (en fazla 80x80 çözünürlüklü) fotoğrafınızı (bir başka deyişle hackergotchi); varsa jabber adresini aşağıdaki formu kullanarak göndermenizi rica ediyoruz. +

+ +
+ + {% ifnotequal submit 'done' %} + +

Üye Başvuru Formu

+
+ {% for field in form %} +
+ {% if field.errors %} + {{ field.errors }} + {% endif %} + {{ field.label_tag }} + {% if field.help_text %} + {{ field.help_text }} + {% endif %} + {{ field }} +
+ {% endfor %} +
+ +
+ {% else %} +

+ Kaydınız alındı. +

+ {% endifnotequal %} + + {% endblock %} + diff --git a/DJAGEN/branches/mustafa_branch/djagen/testdir/__init__.py b/DJAGEN/branches/mustafa_branch/djagen/testdir/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/mustafa_branch/djagen/testdir/deneme.py b/DJAGEN/branches/mustafa_branch/djagen/testdir/deneme.py new file mode 100755 index 0000000..f0e5a5e --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/testdir/deneme.py @@ -0,0 +1,7 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +class Deneme: + + def test(self): + print "ok" diff --git a/DJAGEN/branches/mustafa_branch/djagen/urls.py b/DJAGEN/branches/mustafa_branch/djagen/urls.py new file mode 100755 index 0000000..4fa21cc --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/urls.py @@ -0,0 +1,36 @@ +from django.conf.urls.defaults import * +from djagen.collector.views import * +from djagen import settings + + +# Uncomment the next two lines to enable the admin: +from django.contrib import admin +admin.autodiscover() + +urlpatterns = patterns('', + + # Uncomment the admin/doc line below and add 'django.contrib.admindocs' + # to INSTALLED_APPS to enable admin documentation: + # (r'^admin/doc/', include('django.contrib.admindocs.urls')), + + # Uncomment the next line to enable the admin: + (r'^admin/', include(admin.site.urls)), + #(r'^archive/$',archive), + (r'^main/', 'djagen.collector.views.main'), + (r'^subscribe/', 'djagen.collector.views.member_subscribe'), + (r'^members/', 'djagen.collector.views.list_members'), + (r'^archive/$','djagen.collector.views.archive'), + (r'^archive/(?P\d{4})/$', archive), + (r'^archive/(?P\d{4})/(?P\d{1,2})/$', archive), + (r'^archive/(?P\d{4})/(?P\d{1,2})/(?P\d{1,2})$', archive), + (r'^djagen/$',main), + (r'^query/$',query), + ) +urlpatterns += patterns('', + url(r'^captcha/', include('captcha.urls')), +) + + # For development server. + #(r'^(?P.*)$', 'django.views.static.serve', + # {'document_root': settings.BASEPATH + 'gezegen/www/'}), + diff --git a/DJAGEN/branches/mustafa_branch/djagen/wsgi_handler.py b/DJAGEN/branches/mustafa_branch/djagen/wsgi_handler.py new file mode 100755 index 0000000..419437f --- /dev/null +++ b/DJAGEN/branches/mustafa_branch/djagen/wsgi_handler.py @@ -0,0 +1,11 @@ +import sys +import os + +# WSGI handler module. + +sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..') +os.environ['DJANGO_SETTINGS_MODULE'] = 'djagen.settings' + +import django.core.handlers.wsgi + +application = django.core.handlers.wsgi.WSGIHandler() \ No newline at end of file diff --git a/DJAGEN/branches/oguz/00_default_vhost.conf b/DJAGEN/branches/oguz/00_default_vhost.conf new file mode 100755 index 0000000..d42c32f --- /dev/null +++ b/DJAGEN/branches/oguz/00_default_vhost.conf @@ -0,0 +1,218 @@ +### Section 3: Virtual Hosts +# +# VirtualHost: If you want to maintain multiple domains/hostnames on your +# machine you can setup VirtualHost containers for them. Most configurations +# use only name-based virtual hosts so the server doesn't need to worry about +# IP addresses. This is indicated by the asterisks in the directives below. +# +# Please see the documentation at +# +# for further details before you try to setup virtual hosts. +# +# You may use the command line option '-S' to verify your virtual host +# configuration. + +# +# Use name-based virtual hosting. +# +NameVirtualHost *:80 + +# +# VirtualHost example: +# Almost any Apache directive may go into a VirtualHost container. +# The first VirtualHost section is used for requests without a known +# server name. +# +# +# ServerAdmin webmaster@dummy-host.example.com +# DocumentRoot /www/docs/dummy-host.example.com +# ServerName dummy-host.example.com +# ErrorLog @rel_logfiledir@/dummy-host.example.com-error_log +# CustomLog @rel_logfiledir@/dummy-host.example.com-access_log common +# + + +# +# The First Virtual Host is also your DEFAULT Virtual Host. +# This means any requests that do not match any other vhosts will +# goto this virtual host. +# + + + + # + # DocumentRoot: The directory out of which you will serve your + # documents. By default, all requests are taken from this directory, but + # symbolic links and aliases may be used to point to other locations. + # + DocumentRoot "/var/www/localhost/htdocs" + + # + # This should be changed to whatever you set DocumentRoot to. + # + + + # + # Possible values for the Options directive are "None", "All", + # or any combination of: + # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews + # + # Note that "MultiViews" must be named *explicitly* --- "Options All" + # doesn't give it to you. + # + # The Options directive is both complicated and important. Please see + # http://httpd.apache.org/docs-2.0/mod/core.html#options + # for more information. + # + Options Indexes FollowSymLinks + + # + # AllowOverride controls what directives may be placed in .htaccess files. + # It can be "All", "None", or any combination of the keywords: + # Options FileInfo AuthConfig Limit + # + AllowOverride None + + # + # Controls who can get stuff from this server. + # + Order allow,deny + Allow from all + + + + # this must match a Processor + ServerEnvironment apache apache + + # these are optional - defaults to the values specified in httpd.conf + MinSpareProcessors 4 + MaxProcessors 20 + + + + + + ServerName django.localhost.in + + + SetHandler python-program + PythonHandler django.core.handlers.modpython + SetEnv DJANGO_SETTINGS_MODULE pijama.settings + PythonOption django.root /pijama + PythonDebug On + PythonPath "['/home/oguz/neu/innova/people/oguz/', '/home/oguz/neu/innova/people/oguz/pijama/'] + sys.path" + + + +Alias /media/ /var/www/localhost/htdocs/pijama/ + + SetHandler None + + +# settings for innovation helpdesk + + SetHandler python-program + PythonHandler django.core.handlers.modpython + SetEnv DJANGO_SETTINGS_MODULE jutda.settings + PythonOption django.root /jutda + PythonDebug On + PythonPath "['/home/oguz/django-projects/', '/home/oguz/django-projects/jutda/'] + sys.path" + + +Alias /helpdesk/ /var/www/localhost/htdocs/helpdesk/htdocs/ + + SetHandler None + + +Alias /media/ /var/www/localhost/htdocs/helpdesk/htdocs/media/ + + SetHandler None + + +# +# SetHandler None +# + +# setting for ssk helpdesk + + SetHandler python-program + PythonHandler django.core.handlers.modpython + SetEnv DJANGO_SETTINGS_MODULE jutda_dc.settings + PythonOption django.root /jutdadc + PythonDebug On + PythonPath "['/home/oguz/django-projects/', '/home/oguz/django-projects/jutda_dc/'] + sys.path" + + +Alias /helpdesk/ /var/www/localhost/htdocs/helpdesk_dc/htdocs/ + + SetHandler None + + +Alias /media/ /var/www/localhost/htdocs/helpdesk_dc/htdocs/media/ + + SetHandler None + + + + SetHandler None + + +# setting for ssk helpdesk + + SetHandler python-program + PythonHandler django.core.handlers.modpython + SetEnv DJANGO_SETTINGS_MODULE mtest.settings + PythonOption django.root /mtest + PythonDebug On + PythonPath "['/home/oguz/django-projects/', '/home/oguz/django-projects/mtest/'] + sys.path" + + + + SetHandler None + + +# settings for djagen + + SetHandler python-program + PythonHandler django.core.handlers.modpython + SetEnv DJANGO_SETTINGS_MODULE djagen.settings + PythonOption django.root /djagen + PythonDebug On + PythonPath "['/home/oguz/django-projects/', '/home/oguz/django-projects/djagen/'] + sys.path" + + +Alias /djagenmedia/ /var/www/localhost/htdocs/djagen/ + + SetHandler None + + + + SetHandler None + + +#settings for noan + + SetHandler python-program + PythonHandler django.core.handlers.modpython + SetEnv DJANGO_SETTINGS_MODULE noan.settings + PythonOption django.root /noan + PythonDebug On + PythonPath "['/home/oguz/django-projects/', '/home/oguz/django-projects/noan/'] + sys.path" + + +Alias /noanmedia/ /var/www/localhost/htdocs/noan/media/ + + SetHandler None + + +Alias /admin_media/ /usr/lib/python2.5/site-packages/django/contrib/admin/media + + SetHandler None + + + + SetHandler None + + + + diff --git a/DJAGEN/branches/oguz/README b/DJAGEN/branches/oguz/README new file mode 100755 index 0000000..0f29879 --- /dev/null +++ b/DJAGEN/branches/oguz/README @@ -0,0 +1,2 @@ + -l /var/www/localhost/htdocs/ + lrwxrwxrwx 1 root root 46 2010-05-09 22:48 djagen -> /home/oguz/django-projects/djagen/gezegen/www/ diff --git a/DJAGEN/branches/oguz/djagen/__init__.py b/DJAGEN/branches/oguz/djagen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/oguz/djagen/admin_media b/DJAGEN/branches/oguz/djagen/admin_media new file mode 120000 index 0000000..454763c --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/admin_media @@ -0,0 +1 @@ +/usr/lib/python2.5/site-packages/django/contrib/admin \ No newline at end of file diff --git a/DJAGEN/branches/oguz/djagen/captcha/__init__.py b/DJAGEN/branches/oguz/djagen/captcha/__init__.py new file mode 100755 index 0000000..ac47d9a --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/__init__.py @@ -0,0 +1,12 @@ +VERSION = (0, 1, 7) + +def get_version(svn=False): + "Returns the version as a human-format string." + v = '.'.join([str(i) for i in VERSION]) + if svn: + from django.utils.version import get_svn_revision + import os + svn_rev = get_svn_revision(os.path.dirname(__file__)) + if svn_rev: + v = '%s-%s' % (v, svn_rev) + return v diff --git a/DJAGEN/branches/oguz/djagen/captcha/conf/__init__.py b/DJAGEN/branches/oguz/djagen/captcha/conf/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/oguz/djagen/captcha/conf/settings.py b/DJAGEN/branches/oguz/djagen/captcha/conf/settings.py new file mode 100755 index 0000000..ddfe82f --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/conf/settings.py @@ -0,0 +1,49 @@ +import os +from django.conf import settings + +CAPTCHA_FONT_PATH = getattr(settings,'CAPTCHA_FONT_PATH', os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'fonts/Vera.ttf'))) +CAPTCHA_FONT_SIZE = getattr(settings,'CAPTCHA_FONT_SIZE', 22) +CAPTCHA_LETTER_ROTATION = getattr(settings, 'CAPTCHA_LETTER_ROTATION', (-35,35)) +CAPTCHA_BACKGROUND_COLOR = getattr(settings,'CAPTCHA_BACKGROUND_COLOR', '#ffffff') +CAPTCHA_FOREGROUND_COLOR= getattr(settings,'CAPTCHA_FOREGROUND_COLOR', '#001100') +CAPTCHA_CHALLENGE_FUNCT = getattr(settings,'CAPTCHA_CHALLENGE_FUNCT','captcha.helpers.random_char_challenge') +CAPTCHA_NOISE_FUNCTIONS = getattr(settings,'CAPTCHA_NOISE_FUNCTIONS', ('captcha.helpers.noise_arcs','captcha.helpers.noise_dots',)) +CAPTCHA_FILTER_FUNCTIONS = getattr(settings,'CAPTCHA_FILTER_FUNCTIONS',('captcha.helpers.post_smooth',)) +CAPTCHA_WORDS_DICTIONARY = getattr(settings,'CAPTCHA_WORDS_DICTIONARY', '/usr/share/dict/words') +CAPTCHA_FLITE_PATH = getattr(settings,'CAPTCHA_FLITE_PATH',None) +CAPTCHA_TIMEOUT = getattr(settings, 'CAPTCHA_TIMEOUT', 5) # Minutes +CAPTCHA_LENGTH = int(getattr(settings, 'CAPTCHA_LENGTH', 4)) # Chars +CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings,'CAPTCHA_IMAGE_BEFORE_FIELD', True) +CAPTCHA_DICTIONARY_MIN_LENGTH = getattr(settings,'CAPTCHA_DICTIONARY_MIN_LENGTH', 0) +CAPTCHA_DICTIONARY_MAX_LENGTH = getattr(settings,'CAPTCHA_DICTIONARY_MAX_LENGTH', 99) +if CAPTCHA_IMAGE_BEFORE_FIELD: + CAPTCHA_OUTPUT_FORMAT = getattr(settings,'CAPTCHA_OUTPUT_FORMAT', u'%(image)s %(hidden_field)s %(text_field)s') +else: + CAPTCHA_OUTPUT_FORMAT = getattr(settings,'CAPTCHA_OUTPUT_FORMAT', u'%(hidden_field)s %(text_field)s %(image)s') + + +# Failsafe +if CAPTCHA_DICTIONARY_MIN_LENGTH > CAPTCHA_DICTIONARY_MAX_LENGTH: + CAPTCHA_DICTIONARY_MIN_LENGTH, CAPTCHA_DICTIONARY_MAX_LENGTH = CAPTCHA_DICTIONARY_MAX_LENGTH, CAPTCHA_DICTIONARY_MIN_LENGTH + + +def _callable_from_string(string_or_callable): + if callable(string_or_callable): + return string_or_callable + else: + return getattr(__import__( '.'.join(string_or_callable.split('.')[:-1]), {}, {}, ['']), string_or_callable.split('.')[-1]) + +def get_challenge(): + return _callable_from_string(CAPTCHA_CHALLENGE_FUNCT) + + +def noise_functions(): + if CAPTCHA_NOISE_FUNCTIONS: + return map(_callable_from_string, CAPTCHA_NOISE_FUNCTIONS) + return list() + +def filter_functions(): + if CAPTCHA_FILTER_FUNCTIONS: + return map(_callable_from_string, CAPTCHA_FILTER_FUNCTIONS) + return list() + diff --git a/DJAGEN/branches/oguz/djagen/captcha/fields.py b/DJAGEN/branches/oguz/djagen/captcha/fields.py new file mode 100755 index 0000000..7df0f03 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/fields.py @@ -0,0 +1,81 @@ +from django.forms.fields import CharField, MultiValueField +from django.forms import ValidationError +from django.forms.widgets import TextInput, MultiWidget, HiddenInput +from django.utils.safestring import mark_safe +from django.utils.translation import ugettext_lazy as _ +from django.core.urlresolvers import reverse +from captcha.models import CaptchaStore +from captcha.conf import settings +from captcha.helpers import * +import datetime + +class CaptchaTextInput(MultiWidget): + def __init__(self,attrs=None): + widgets = ( + HiddenInput(attrs), + TextInput(attrs), + ) + + for key in ('image','hidden_field','text_field'): + if '%%(%s)s'%key not in settings.CAPTCHA_OUTPUT_FORMAT: + raise KeyError('All of %s must be present in your CAPTCHA_OUTPUT_FORMAT setting. Could not find %s' %( + ', '.join(['%%(%s)s'%k for k in ('image','hidden_field','text_field')]), + '%%(%s)s'%key + )) + + super(CaptchaTextInput,self).__init__(widgets,attrs) + + def decompress(self,value): + if value: + return value.split(',') + return [None,None] + + def format_output(self, rendered_widgets): + hidden_field, text_field = rendered_widgets + return settings.CAPTCHA_OUTPUT_FORMAT %dict(image=self.image_and_audio, hidden_field=hidden_field, text_field=text_field) + + def render(self, name, value, attrs=None): + challenge,response= settings.get_challenge()() + + store = CaptchaStore.objects.create(challenge=challenge,response=response) + key = store.hashkey + value = [key, u''] + + self.image_and_audio = 'captcha' %reverse('captcha-image',kwargs=dict(key=key)) + if settings.CAPTCHA_FLITE_PATH: + self.image_and_audio = '%s' %( reverse('captcha-audio', kwargs=dict(key=key)), unicode(_('Play captcha as audio file')), self.image_and_audio) + #fields = super(CaptchaTextInput, self).render(name, value, attrs=attrs) + + return super(CaptchaTextInput, self).render(name, value, attrs=attrs) + +class CaptchaField(MultiValueField): + widget=CaptchaTextInput + + def __init__(self, *args,**kwargs): + fields = ( + CharField(show_hidden_initial=True), + CharField(), + ) + if 'error_messages' not in kwargs or 'invalid' not in kwargs.get('error_messages'): + if 'error_messages' not in kwargs: + kwargs['error_messages'] = dict() + kwargs['error_messages'].update(dict(invalid=_('Invalid CAPTCHA'))) + + + super(CaptchaField,self).__init__(fields=fields, *args, **kwargs) + + def compress(self,data_list): + if data_list: + return ','.join(data_list) + return None + + def clean(self, value): + super(CaptchaField, self).clean(value) + response, value[1] = value[1].strip().lower(), '' + CaptchaStore.remove_expired() + try: + store = CaptchaStore.objects.get(response=response, hashkey=value[0], expiration__gt=datetime.datetime.now()) + store.delete() + except Exception: + raise ValidationError(getattr(self,'error_messages',dict()).get('invalid', _('Invalid CAPTCHA'))) + return value diff --git a/DJAGEN/branches/oguz/djagen/captcha/fonts/COPYRIGHT.TXT b/DJAGEN/branches/oguz/djagen/captcha/fonts/COPYRIGHT.TXT new file mode 100755 index 0000000..e651be1 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/fonts/COPYRIGHT.TXT @@ -0,0 +1,124 @@ +Bitstream Vera Fonts Copyright + +The fonts have a generous copyright, allowing derivative works (as +long as "Bitstream" or "Vera" are not in the names), and full +redistribution (so long as they are not *sold* by themselves). They +can be be bundled, redistributed and sold with any software. + +The fonts are distributed under the following copyright: + +Copyright +========= + +Copyright (c) 2003 by Bitstream, Inc. All Rights Reserved. Bitstream +Vera is a trademark of Bitstream, Inc. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the fonts accompanying this license ("Fonts") and associated +documentation files (the "Font Software"), to reproduce and distribute +the Font Software, including without limitation the rights to use, +copy, merge, publish, distribute, and/or sell copies of the Font +Software, and to permit persons to whom the Font Software is furnished +to do so, subject to the following conditions: + +The above copyright and trademark notices and this permission notice +shall be included in all copies of one or more of the Font Software +typefaces. + +The Font Software may be modified, altered, or added to, and in +particular the designs of glyphs or characters in the Fonts may be +modified and additional glyphs or characters may be added to the +Fonts, only if the fonts are renamed to names not containing either +the words "Bitstream" or the word "Vera". + +This License becomes null and void to the extent applicable to Fonts +or Font Software that has been modified and is distributed under the +"Bitstream Vera" names. + +The Font Software may be sold as part of a larger software package but +no copy of one or more of the Font Software typefaces may be sold by +itself. + +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL +BITSTREAM OR THE GNOME FOUNDATION BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, +OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT +SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE. + +Except as contained in this notice, the names of Gnome, the Gnome +Foundation, and Bitstream Inc., shall not be used in advertising or +otherwise to promote the sale, use or other dealings in this Font +Software without prior written authorization from the Gnome Foundation +or Bitstream Inc., respectively. For further information, contact: +fonts at gnome dot org. + +Copyright FAQ +============= + + 1. I don't understand the resale restriction... What gives? + + Bitstream is giving away these fonts, but wishes to ensure its + competitors can't just drop the fonts as is into a font sale system + and sell them as is. It seems fair that if Bitstream can't make money + from the Bitstream Vera fonts, their competitors should not be able to + do so either. You can sell the fonts as part of any software package, + however. + + 2. I want to package these fonts separately for distribution and + sale as part of a larger software package or system. Can I do so? + + Yes. A RPM or Debian package is a "larger software package" to begin + with, and you aren't selling them independently by themselves. + See 1. above. + + 3. Are derivative works allowed? + Yes! + + 4. Can I change or add to the font(s)? + Yes, but you must change the name(s) of the font(s). + + 5. Under what terms are derivative works allowed? + + You must change the name(s) of the fonts. This is to ensure the + quality of the fonts, both to protect Bitstream and Gnome. We want to + ensure that if an application has opened a font specifically of these + names, it gets what it expects (though of course, using fontconfig, + substitutions could still could have occurred during font + opening). You must include the Bitstream copyright. Additional + copyrights can be added, as per copyright law. Happy Font Hacking! + + 6. If I have improvements for Bitstream Vera, is it possible they might get + adopted in future versions? + + Yes. The contract between the Gnome Foundation and Bitstream has + provisions for working with Bitstream to ensure quality additions to + the Bitstream Vera font family. Please contact us if you have such + additions. Note, that in general, we will want such additions for the + entire family, not just a single font, and that you'll have to keep + both Gnome and Jim Lyles, Vera's designer, happy! To make sense to add + glyphs to the font, they must be stylistically in keeping with Vera's + design. Vera cannot become a "ransom note" font. Jim Lyles will be + providing a document describing the design elements used in Vera, as a + guide and aid for people interested in contributing to Vera. + + 7. I want to sell a software package that uses these fonts: Can I do so? + + Sure. Bundle the fonts with your software and sell your software + with the fonts. That is the intent of the copyright. + + 8. If applications have built the names "Bitstream Vera" into them, + can I override this somehow to use fonts of my choosing? + + This depends on exact details of the software. Most open source + systems and software (e.g., Gnome, KDE, etc.) are now converting to + use fontconfig (see www.fontconfig.org) to handle font configuration, + selection and substitution; it has provisions for overriding font + names and subsituting alternatives. An example is provided by the + supplied local.conf file, which chooses the family Bitstream Vera for + "sans", "serif" and "monospace". Other software (e.g., the XFree86 + core server) has other mechanisms for font substitution. + diff --git a/DJAGEN/branches/oguz/djagen/captcha/fonts/README.TXT b/DJAGEN/branches/oguz/djagen/captcha/fonts/README.TXT new file mode 100755 index 0000000..0f71795 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/fonts/README.TXT @@ -0,0 +1,11 @@ +Contained herin is the Bitstream Vera font family. + +The Copyright information is found in the COPYRIGHT.TXT file (along +with being incoporated into the fonts themselves). + +The releases notes are found in the file "RELEASENOTES.TXT". + +We hope you enjoy Vera! + + Bitstream, Inc. + The Gnome Project diff --git a/DJAGEN/branches/oguz/djagen/captcha/fonts/Vera.ttf b/DJAGEN/branches/oguz/djagen/captcha/fonts/Vera.ttf new file mode 100755 index 0000000..58cd6b5 Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/captcha/fonts/Vera.ttf differ diff --git a/DJAGEN/branches/oguz/djagen/captcha/helpers.py b/DJAGEN/branches/oguz/djagen/captcha/helpers.py new file mode 100755 index 0000000..b400700 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/helpers.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +import random +from captcha.conf import settings + +def math_challenge(): + operators = ('+','*','-',) + operands = (random.randint(1,10),random.randint(1,10)) + operator = random.choice(operators) + if operands[0] < operands[1] and '-' == operator: + operands = (operands[1],operands[0]) + challenge = '%d%s%d' %(operands[0],operator,operands[1]) + return u'%s=' %(challenge), unicode(eval(challenge)) + +def random_char_challenge(): + chars,ret = u'abcdefghijklmnopqrstuvwxyz', u'' + for i in range(settings.CAPTCHA_LENGTH): + ret += random.choice(chars) + return ret.upper(),ret + +def unicode_challenge(): + chars,ret = u'äàáëéèïíîöóòüúù', u'' + for i in range(settings.CAPTCHA_LENGTH): + ret += random.choice(chars) + return ret.upper(), ret + +def word_challenge(): + fd = file(settings.CAPTCHA_WORDS_DICTIONARY,'rb') + l = fd.readlines() + fd.close() + while True: + word = random.choice(l).strip() + if len(word) >= settings.CAPTCHA_DICTIONARY_MIN_LENGTH and len(word) <= settings.CAPTCHA_DICTIONARY_MAX_LENGTH: + break + return word.upper(), word.lower() + +def noise_arcs(draw,image): + size = image.size + draw.arc([-20,-20, size[0],20], 0, 295, fill=settings.CAPTCHA_FOREGROUND_COLOR) + draw.line([-20,20, size[0]+20,size[1]-20], fill=settings.CAPTCHA_FOREGROUND_COLOR) + draw.line([-20,0, size[0]+20,size[1]], fill=settings.CAPTCHA_FOREGROUND_COLOR) + return draw + +def noise_dots(draw,image): + size = image.size + for p in range(int(size[0]*size[1]*0.1)): + draw.point((random.randint(0, size[0]),random.randint(0, size[1])), fill=settings.CAPTCHA_FOREGROUND_COLOR ) + return draw + +def post_smooth(image): + import ImageFilter + return image.filter(ImageFilter.SMOOTH) diff --git a/DJAGEN/branches/oguz/djagen/captcha/management/__init__.py b/DJAGEN/branches/oguz/djagen/captcha/management/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/oguz/djagen/captcha/management/commands/__init__.py b/DJAGEN/branches/oguz/djagen/captcha/management/commands/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/oguz/djagen/captcha/management/commands/captcha_clean.py b/DJAGEN/branches/oguz/djagen/captcha/management/commands/captcha_clean.py new file mode 100755 index 0000000..9a66e48 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/management/commands/captcha_clean.py @@ -0,0 +1,28 @@ +from django.core.management.base import BaseCommand, CommandError +import sys + +from optparse import make_option + +class Command(BaseCommand): + help = "Clean up expired captcha hashkeys." + + def handle(self, **options): + from captcha.models import CaptchaStore + import datetime + verbose = int(options.get('verbosity')) + expired_keys = CaptchaStore.objects.filter(expiration__lte=datetime.datetime.now()).count() + if verbose >= 1: + print "Currently %s expired hashkeys" % expired_keys + try: + CaptchaStore.remove_expired() + except: + if verbose >= 1 : + print "Unable to delete expired hashkeys." + sys.exit(1) + if verbose >= 1: + if expired_keys > 0: + print "Expired hashkeys removed." + else: + print "No keys to remove." + + diff --git a/DJAGEN/branches/oguz/djagen/captcha/models.py b/DJAGEN/branches/oguz/djagen/captcha/models.py new file mode 100755 index 0000000..fc8c599 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/models.py @@ -0,0 +1,46 @@ +from django.db import models +from captcha.conf import settings as captcha_settings +import datetime, unicodedata, random, time + +# Heavily based on session key generation in Django +# Use the system (hardware-based) random number generator if it exists. +if hasattr(random, 'SystemRandom'): + randrange = random.SystemRandom().randrange +else: + randrange = random.randrange +MAX_RANDOM_KEY = 18446744073709551616L # 2 << 63 + + +try: + import hashlib # sha for Python 2.5+ +except ImportError: + import sha # sha for Python 2.4 (deprecated in Python 2.6) + hashlib = False + +class CaptchaStore(models.Model): + challenge = models.CharField(blank=False, max_length=32) + response = models.CharField(blank=False, max_length=32) + hashkey = models.CharField(blank=False, max_length=40, unique=True) + expiration = models.DateTimeField(blank=False) + + def save(self,*args,**kwargs): + self.response = self.response.lower() + if not self.expiration: + self.expiration = datetime.datetime.now() + datetime.timedelta(minutes= int(captcha_settings.CAPTCHA_TIMEOUT)) + if not self.hashkey: + key_ = unicodedata.normalize('NFKD', str(randrange(0,MAX_RANDOM_KEY)) + str(time.time()) + unicode(self.challenge)).encode('ascii', 'ignore') + unicodedata.normalize('NFKD', unicode(self.response)).encode('ascii', 'ignore') + if hashlib: + self.hashkey = hashlib.new('sha', key_).hexdigest() + else: + self.hashkey = sha.new(key_).hexdigest() + del(key_) + super(CaptchaStore,self).save(*args,**kwargs) + + def __unicode__(self): + return self.challenge + + + def remove_expired(cls): + cls.objects.filter(expiration__lte=datetime.datetime.now()).delete() + remove_expired = classmethod(remove_expired) + diff --git a/DJAGEN/branches/oguz/djagen/captcha/tests/__init__.py b/DJAGEN/branches/oguz/djagen/captcha/tests/__init__.py new file mode 100755 index 0000000..ded5948 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/tests/__init__.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +from captcha.conf import settings +from captcha.models import CaptchaStore +from django.core.urlresolvers import reverse +from django.test import TestCase +from django.utils.translation import ugettext_lazy as _ +import datetime + + +class CaptchaCase(TestCase): + urls = 'captcha.tests.urls' + + def setUp(self): + self.default_challenge = settings.get_challenge()() + self.math_challenge = settings._callable_from_string('captcha.helpers.math_challenge')() + self.chars_challenge = settings._callable_from_string('captcha.helpers.random_char_challenge')() + self.unicode_challenge = settings._callable_from_string('captcha.helpers.unicode_challenge')() + + self.default_store, created = CaptchaStore.objects.get_or_create(challenge=self.default_challenge[0],response=self.default_challenge[1]) + self.math_store, created = CaptchaStore.objects.get_or_create(challenge=self.math_challenge[0],response=self.math_challenge[1]) + self.chars_store, created = CaptchaStore.objects.get_or_create(challenge=self.chars_challenge[0],response=self.chars_challenge[1]) + self.unicode_store, created = CaptchaStore.objects.get_or_create(challenge=self.unicode_challenge[0],response=self.unicode_challenge[1]) + + + + + def testImages(self): + for key in (self.math_store.hashkey, self.chars_store.hashkey, self.default_store.hashkey, self.unicode_store.hashkey): + response = self.client.get(reverse('captcha-image',kwargs=dict(key=key))) + self.failUnlessEqual(response.status_code, 200) + self.assertTrue(response.has_header('content-type')) + self.assertEquals(response._headers.get('content-type'), ('Content-Type', 'image/png')) + + def testAudio(self): + if not settings.CAPTCHA_FLITE_PATH: + return + for key in (self.math_store.hashkey, self.chars_store.hashkey, self.default_store.hashkey, self.unicode_store.hashkey): + response = self.client.get(reverse('captcha-audio',kwargs=dict(key=key))) + self.failUnlessEqual(response.status_code, 200) + self.assertTrue(len(response.content) > 1024) + self.assertTrue(response.has_header('content-type')) + self.assertEquals(response._headers.get('content-type'), ('Content-Type', 'audio/x-wav')) + + def testFormSubmit(self): + r = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r.status_code, 200) + hash_ = r.content[r.content.find('value="')+7:r.content.find('value="')+47] + try: + response = CaptchaStore.objects.get(hashkey=hash_).response + except: + self.fail() + + r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r.status_code, 200) + self.assertTrue(r.content.find('Form validated') > 0) + + r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r.status_code, 200) + self.assertFalse(r.content.find('Form validated') > 0) + + + + def testWrongSubmit(self): + r = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r.status_code, 200) + r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc',captcha_1='wrong response', subject='xxx', sender='asasd@asdasd.com')) + self.assertFormError(r,'form','captcha',_('Invalid CAPTCHA')) + + def testDeleteExpired(self): + self.default_store.expiration = datetime.datetime.now() - datetime.timedelta(minutes=5) + self.default_store.save() + hash_ = self.default_store.hashkey + r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=self.default_store.response, subject='xxx', sender='asasd@asdasd.com')) + + self.failUnlessEqual(r.status_code, 200) + self.assertFalse(r.content.find('Form validated') > 0) + + # expired -> deleted + try: + CaptchaStore.objects.get(hashkey=hash_) + self.fail() + except: + pass + + def testCustomErrorMessage(self): + r = self.client.get(reverse('captcha-test-custom-error-message')) + self.failUnlessEqual(r.status_code, 200) + + # Wrong answer + r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc',captcha_1='wrong response')) + self.assertFormError(r,'form','captcha','TEST CUSTOM ERROR MESSAGE') + # empty answer + r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc',captcha_1='')) + self.assertFormError(r,'form','captcha',_('This field is required.')) + + def testRepeatedChallenge(self): + store = CaptchaStore.objects.create(challenge='xxx',response='xxx') + try: + store2 = CaptchaStore.objects.create(challenge='xxx',response='xxx') + except Exception: + self.fail() + + + def testRepeatedChallengeFormSubmit(self): + settings.CAPTCHA_CHALLENGE_FUNCT = 'captcha.tests.trivial_challenge' + + r1 = self.client.get(reverse('captcha-test')) + r2 = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r1.status_code, 200) + self.failUnlessEqual(r2.status_code, 200) + hash_1 = r1.content[r1.content.find('value="')+7:r1.content.find('value="')+47] + hash_2 = r2.content[r2.content.find('value="')+7:r2.content.find('value="')+47] + try: + store_1 = CaptchaStore.objects.get(hashkey=hash_1) + store_2 = CaptchaStore.objects.get(hashkey=hash_2) + except: + self.fail() + + self.assertTrue(store_1.pk != store_2.pk) + self.assertTrue(store_1.response == store_2.response) + self.assertTrue(hash_1 != hash_2) + + + + r1 = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_1,captcha_1=store_1.response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r1.status_code, 200) + self.assertTrue(r1.content.find('Form validated') > 0) + + try: + store_2 = CaptchaStore.objects.get(hashkey=hash_2) + except: + self.fail() + + r2 = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_2,captcha_1=store_2.response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r2.status_code, 200) + self.assertTrue(r2.content.find('Form validated') > 0) + + def testOutputFormat(self): + settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s

Hello, captcha world

%(hidden_field)s%(text_field)s' + r = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r.status_code, 200) + self.assertTrue('

Hello, captcha world

' in r.content) + + def testInvalidOutputFormat(self): + settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s' + try: + r = self.client.get(reverse('captcha-test')) + self.fail() + except KeyError: + pass + +def trivial_challenge(): + return 'trivial','trivial' diff --git a/DJAGEN/branches/oguz/djagen/captcha/tests/urls.py b/DJAGEN/branches/oguz/djagen/captcha/tests/urls.py new file mode 100755 index 0000000..78b6ee3 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/tests/urls.py @@ -0,0 +1,6 @@ +from django.conf.urls.defaults import * +urlpatterns = patterns('', + url(r'test/$','captcha.tests.views.test',name='captcha-test'), + url(r'test2/$','captcha.tests.views.test_custom_error_message',name='captcha-test-custom-error-message'), + url(r'',include('captcha.urls')), +) diff --git a/DJAGEN/branches/oguz/djagen/captcha/tests/views.py b/DJAGEN/branches/oguz/djagen/captcha/tests/views.py new file mode 100755 index 0000000..8b836c1 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/tests/views.py @@ -0,0 +1,58 @@ +from django import forms +from captcha.fields import CaptchaField +from django.template import Context, RequestContext, loader +from django.http import HttpResponse + + +TEST_TEMPLATE = r''' + + + + + captcha test + + + {% if passed %} +

Form validated

+ {% endif %} +
+ {{form.as_p}} +

+
+ + +''' + +def test(request): + + class CaptchaTestForm(forms.Form): + subject = forms.CharField(max_length=100) + sender = forms.EmailField() + captcha = CaptchaField(help_text='asdasd') + + if request.POST: + form = CaptchaTestForm(request.POST) + if form.is_valid(): + passed = True + else: + form = CaptchaTestForm() + + t = loader.get_template_from_string(TEST_TEMPLATE) + return HttpResponse(t.render(RequestContext(request, locals()))) + + +def test_custom_error_message(request): + + class CaptchaTestForm(forms.Form): + captcha = CaptchaField(help_text='asdasd', error_messages=dict(invalid='TEST CUSTOM ERROR MESSAGE')) + + if request.POST: + form = CaptchaTestForm(request.POST) + if form.is_valid(): + passed = True + else: + form = CaptchaTestForm() + + t = loader.get_template_from_string(TEST_TEMPLATE) + return HttpResponse(t.render(RequestContext(request, locals()))) diff --git a/DJAGEN/branches/oguz/djagen/captcha/urls.py b/DJAGEN/branches/oguz/djagen/captcha/urls.py new file mode 100755 index 0000000..c458668 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/urls.py @@ -0,0 +1,6 @@ +from django.conf.urls.defaults import * + +urlpatterns = patterns('captcha.views', + url(r'image/(?P\w+)/$','captcha_image',name='captcha-image'), + url(r'audio/(?P\w+)/$','captcha_audio',name='captcha-audio'), +) diff --git a/DJAGEN/branches/oguz/djagen/captcha/views.py b/DJAGEN/branches/oguz/djagen/captcha/views.py new file mode 100755 index 0000000..fec51f7 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/captcha/views.py @@ -0,0 +1,92 @@ +from cStringIO import StringIO +from captcha.models import CaptchaStore +from django.http import HttpResponse, Http404 +from django.shortcuts import get_object_or_404 +import Image,ImageDraw,ImageFont,ImageFilter,random +from captcha.conf import settings +import re + +NON_DIGITS_RX = re.compile('[^\d]') + +def captcha_image(request,key): + store = get_object_or_404(CaptchaStore,hashkey=key) + text=store.challenge + + if settings.CAPTCHA_FONT_PATH.lower().strip().endswith('ttf'): + font = ImageFont.truetype(settings.CAPTCHA_FONT_PATH,settings.CAPTCHA_FONT_SIZE) + else: + font = ImageFont.load(settings.CAPTCHA_FONT_PATH) + + size = font.getsize(text) + size = (size[0]*2,size[1]) + image = Image.new('RGB', size , settings.CAPTCHA_BACKGROUND_COLOR) + + try: + PIL_VERSION = int(NON_DIGITS_RX.sub('',Image.VERSION)) + except: + PIL_VERSION = 116 + + + + xpos = 2 + for char in text: + fgimage = Image.new('RGB', size, settings.CAPTCHA_FOREGROUND_COLOR) + charimage = Image.new('L', font.getsize(' %s '%char), '#000000') + chardraw = ImageDraw.Draw(charimage) + chardraw.text((0,0), ' %s '%char, font=font, fill='#ffffff') + if settings.CAPTCHA_LETTER_ROTATION: + if PIL_VERSION >= 116: + charimage = charimage.rotate(random.randrange( *settings.CAPTCHA_LETTER_ROTATION ), expand=0, resample=Image.BICUBIC) + else: + charimage = charimage.rotate(random.randrange( *settings.CAPTCHA_LETTER_ROTATION ), resample=Image.BICUBIC) + charimage = charimage.crop(charimage.getbbox()) + maskimage = Image.new('L', size) + + maskimage.paste(charimage, (xpos, 4, xpos+charimage.size[0], 4+charimage.size[1] )) + size = maskimage.size + image = Image.composite(fgimage, image, maskimage) + xpos = xpos + 2 + charimage.size[0] + + image = image.crop((0,0,xpos+1,size[1])) + draw = ImageDraw.Draw(image) + + for f in settings.noise_functions(): + draw = f(draw,image) + for f in settings.filter_functions(): + image = f(image) + + out = StringIO() + image.save(out,"PNG") + out.seek(0) + + response = HttpResponse() + response['Content-Type'] = 'image/png' + response.write(out.read()) + + return response + +def captcha_audio(request,key): + if settings.CAPTCHA_FLITE_PATH: + store = get_object_or_404(CaptchaStore,hashkey=key) + text=store.challenge + if 'captcha.helpers.math_challenge' == settings.CAPTCHA_CHALLENGE_FUNCT: + text = text.replace('*','times').replace('-','minus') + else: + text = ', '.join(list(text)) + + import tempfile, os + + path = str(os.path.join(tempfile.gettempdir(),'%s.wav' %key)) + cline = '%s -t "%s" -o "%s"' %(settings.CAPTCHA_FLITE_PATH, text, path) + + os.popen(cline).read() + if os.path.isfile(path): + response = HttpResponse() + f = open(path,'rb') + response['Content-Type'] = 'audio/x-wav' + response.write(f.read()) + f.close() + os.unlink(path) + return response + + raise Http404 diff --git a/DJAGEN/branches/oguz/djagen/collector/__init__.py b/DJAGEN/branches/oguz/djagen/collector/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/oguz/djagen/collector/admin.py b/DJAGEN/branches/oguz/djagen/collector/admin.py new file mode 100755 index 0000000..f6c9e20 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/collector/admin.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django.contrib import admin +from djagen.collector.models import * + +from django.conf import settings + +import os +import datetime +import shutil + +from djagen.collector.configini import * + +class AuthorsAdmin (admin.ModelAdmin): + + list_display = ('author_id', 'author_name', 'author_email', 'author_face', 'current_status', 'is_approved', 'label_personal', 'label_lkd', 'label_community', 'label_eng') + list_select_related = True + + search_fields = ['author_name', 'author_surname', 'author_email'] + + def save_model(self, request, obj, form, change): + + #get the values for saving + author_name = obj.author_name + author_surname = obj.author_surname + author_face = obj.author_face + channel_url = obj.channel_url + + current_status = obj.current_status + is_approved = obj.is_approved + + #creating the history + now = datetime.datetime.now() + action_type = current_status + + author_id = obj.author_id + if author_id: + #then this is an update + author = Authors.objects.get(author_id = author_id) + pre_status = author.is_approved + current_status = obj.is_approved + obj.save() + else: + obj.save() + author = Authors.objects.get(author_name=author_name, author_surname=author_surname, channel_url=channel_url) + pre_status = None + current_status = author.is_approved + + author.history_set.create(action_type=action_type, action_date=now, action_owner=request.user.username) + + + #create tmp_config.ini here + handler = Handler(author.author_id) + handler.create_tmp_entries() + + if pre_status != current_status: + a_face = author.author_face + + images_path = os.path.join(settings.MAIN_PATH, 'www', 'images') + heads_path = os.path.join(images_path, 'heads') + face_path = os.path.join(heads_path, a_face) + + tmp_image_path = os.path.join(settinsg.MAIN_PATH, 'temp_ini', a_face) + + if os.path.exits(tmp_image_path): + shutil.move(tmp_image_path, face_path) + +class HistoryAdmin(admin.ModelAdmin): + list_display = ('action_type', 'action_date', 'action_author', 'action_owner') + +admin.site.register(History, HistoryAdmin) +admin.site.register(Authors, AuthorsAdmin) + diff --git a/DJAGEN/branches/oguz/djagen/collector/configini.py b/DJAGEN/branches/oguz/djagen/collector/configini.py new file mode 100755 index 0000000..af4f7ee --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/collector/configini.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +from django.conf import settings +from djagen.collector.models import * +import ConfigParser + +class Handler: + + def __init__(self, id): + + self.id = id + + self.tmp_entries_ini = os.path.join(settings.MAIN_PATH, 'tmp_ini', 'tmp_entries.ini') + + self.config_entries_ini = os.path.join(settings.MAIN_PATH, 'gezegen', 'config_entries.ini') + + def __set_values(self): + + author = Authors.objects.get(author_id = self.id) + + if not author.is_approved: + return False + + self.name = author.author_name + ' ' + author.author_surname + self.face = author.author_face + self.url = author.channel_url + + labels = {author.label_personal:'Personal', author.label_lkd: 'LKD', author.label_community: 'Community', author.label_eng: 'Eng'} + + label_li = [k for k,v in labels.iteritems() if v==1] + self.author_labels = " ".join(label_li) + + return True + + def create_tmp_entries(self): + + if not self.__set_values(): return + + config_entries = open(self.config_entries_ini) + tmp_entries = open(self.tmp_entries_ini, 'w') + + Config = ConfigParser.ConfigParser() + Config.read(self.config_entries_ini) + sections = Config.sections() + + for section in sections: + + config_name = Config.get(section, 'name') + config_label = Config.get(section, 'label') + config_id = Config.get(section, 'id') + config_url = section + + try: + config_face = Config.get(section, 'face') + except: + config_face = None + + if config_id == self.id: + + url = self.url + face = self.face + name = self.name + label = self.author_labels + id = self.id + + else: + + url = config_url + face = config_face + name = config_name + label = config_label + id = config_id + + s = url + '\n' + s += 'name = ' + name + '\n' + s += 'label = ' + label + '\n' + if face: + s += 'face = ' + face + '\n' + s += 'id = ' + id + '\n' + '\n' + + tmp_entries.write(s) + + tmp_entries.close() + + + + + + + + diff --git a/DJAGEN/branches/oguz/djagen/collector/forms.py b/DJAGEN/branches/oguz/djagen/collector/forms.py new file mode 100755 index 0000000..11a61d8 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/collector/forms.py @@ -0,0 +1,17 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django import forms +from captcha.fields import CaptchaField + +class ContactForm(forms.Form): + + name = forms.CharField(max_length=25, required=True, error_messages={'required': 'Lütfen adınızı giriniz'}, label='Adınız') + surname = forms.CharField(max_length=25, required=True, error_messages={'required': 'Lütfen soyadınızı giriniz'}, label='Soyadınız') + email = forms.EmailField(required=True, error_messages={'required': 'Size ulaşabileceğimiz eposta adresinizi giriniz'}, label='Eposta Adresiniz') + hackergotchi = forms.FileField(required=False, label='Hacketgotchiniz', help_text='Max 80*80 pixellik Gezegende görünmesini istediğiniz fotoğrafınız') + feed = forms.URLField(required=True, label='Besleme adresiniz', help_text='Günlüğünüzün XML kaynağının adresi') + message = forms.CharField(required=False, label='İletişim Mesajınız', widget=forms.widgets.Textarea()) + #field for captcha + captcha = CaptchaField(label="Captcha Alanı", help_text='Gördüğünü karakterleri aynen yazınız', error_messages={'required': 'Hatalı yazdınız!'}) + diff --git a/DJAGEN/branches/oguz/djagen/collector/models.py b/DJAGEN/branches/oguz/djagen/collector/models.py new file mode 100755 index 0000000..eee5269 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/collector/models.py @@ -0,0 +1,111 @@ +from django.db import models +import datetime, unicodedata, random, time +import re + +# Create your models here. +ACTION_CHOICES = ( + (1, u'Removed'), + (2, u'Approved'), + (3, u'Paused'), + (4, u'Readded'), + (5, u'Applied'), + (6, u'Editted') + ) + +class Authors (models.Model): + author_id = models.AutoField(primary_key=True, help_text="Author ID") + author_name = models.CharField(max_length=50, help_text="Author Name") + author_surname = models.CharField(max_length=50, help_text="Author Name") + #we dont keep emails at the config.ini files, this part should be entered at the admin page + author_email = models.EmailField(null=True, blank=True, help_text="Author Email Address") + #the png file name of the author + author_face = models.CharField(max_length=30, null=True, blank=True, help_text="Author Face Name") + channel_subtitle = models.TextField(null=True, blank=True, help_text="Channel Subtitle") + channel_title = models.TextField(null=True, blank=True, help_text="Channel Title") + #URL of the feed. + channel_url = models.URLField(help_text="Channel URL") + #Link to the original format feed + channel_link = models.URLField(null=True, blank=True, help_text="Channel Link") + channel_urlstatus = models.IntegerField(null=True, blank=True, help_text="Channel URL Status") + + #use this field to check whether the author is shown on the planet or not, like banned situations + current_status = models.SmallIntegerField(default=2, choices=ACTION_CHOICES, help_text="Current Status of the Author") + #whether the application to the planet is approved, the approved ones will be shown at the planet + is_approved = models.BooleanField(default=1, help_text="Approve Status of the Author") + + #planets that the channel belongs to + #at the config.ini the entries should be obe of the belows: + #label = Personal + #label = LKD + #label = Eng + #label = Community + label_personal = models.BooleanField(default=1, help_text="Channnels at the Personal Blog Page") + label_lkd = models.BooleanField(default=0, help_text="Channels that are belong to LKD Blogs") + label_community = models.BooleanField(default=0, help_text="Channels that are belong to some community blogs") + label_eng = models.BooleanField(default=0, help_text="Channels that have English entries") + #at the main page, lets just show personal and lkd for now, for communities lets ask them a special rss + + def __unicode__(self): + return u'%s %s' % (self.author_name, self.author_surname) + + class Meta: + #order according to the author_name, ascending + ordering = ['author_name'] + +# keep the history for the action that are done on the member urls +class History (models.Model): + action_type = models.SmallIntegerField(choices=ACTION_CHOICES) + action_date = models.DateTimeField() + action_explanation = models.TextField(help_text="Reason of Action", blank=True, null=True) + action_author = models.ForeignKey('Authors') + action_owner = models.CharField(max_length=20, help_text="The user who did the action") + + def __unicode__(self): + return str(self.action_type) + + class Meta: + #order descending, show the last actions at top + ordering = ['-action_date'] + +class Entries (models.Model): + id_hash = models.CharField(max_length=50, help_text="Hash of the ID", primary_key=True) + title = models.CharField(max_length=150, help_text="Entry Title") + content_html = models.TextField(help_text="Entry Orginal Content") + content_text = models.TextField(help_text="Entry Pure Text Content") + summary = models.TextField(help_text="Entry Summary", null=True, blank=True) + link = models.URLField(help_text="Link to Entry") + date = models.DateTimeField(help_text="Date of the entry") + entry_id = models.ForeignKey('Authors') + + def __unicode__(self): + + return self.title + + class Meta: + + ordering = ['-date'] + + + def sanitize(self, data): + p = re.compile(r'<[^<]*?/?>') + return p.sub('', data) + +class RunTime (models.Model): + run_time = models.DateTimeField(help_text="Run time of the planet script", auto_now=True) + + def __unicode__(self): + + return self.run_time + + class Meta: + + ordering = ['-run_time'] + + def get_run_time(self): + + dt = ".".join(map(lambda x: str(x), [self.run_time.day, self.run_time.month, self.run_time.year])) + hm = ":".join(map(lambda x: str(x), [self.run_time.hour, self.run_time.minute])) + + rslt = " ".join([dt, hm]) + return rslt + diff --git a/DJAGEN/branches/oguz/djagen/collector/views.py b/DJAGEN/branches/oguz/djagen/collector/views.py new file mode 100755 index 0000000..73b82b7 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/collector/views.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +# Create your views here. + +from django.shortcuts import render_to_response +from django.http import HttpResponse +from djagen.collector.models import * +from djagen.collector.forms import ContactForm +from djagen.collector.wrappers import render_response +from django.conf import settings +from django.utils.datastructures import MultiValueDictKeyError +import magic +import os +import datetime, time + +BASE_URL = settings.BASE_URL + + +def main(request): + selected_entries = Entries.objects.select_related() + entries_list1 = selected_entries.filter(entry_id__label_personal = 1) + entries_list2 = selected_entries.filter(entry_id__label_lkd = 1) + entries_list3 = selected_entries.filter(entry_id__label_community = 1) + entries_list = entries_list1 | entries_list2 | entries_list3 + # This setting truncating content which has more than words. + truncate_words = 250 + items_per_page = 25 + + #get the last run time + run_time = RunTime.objects.all()[0] + + #get the last entries' date + last_entry_date = Entries.objects.all()[0].date + day = datetime.timedelta(days=1) + last_date_li = [] + for x in xrange(6): + last_entry_date -= day + last_date_li.append(last_entry_date) + + return render_response(request, 'main/main.html' ,{ + 'entries_list':entries_list, + 'truncate_words':truncate_words, + 'items_per_page':repr(items_per_page), + 'run_time':run_time, + 'BASE_URL': BASE_URL, + 'last_date_li': last_date_li, + }) + +def member_subscribe(request): + if request.method == 'POST': + form = ContactForm(request.POST, request.FILES) + #return HttpResponse(str(request.FILES)) + if form.is_valid(): + human = True + try: + check = handle_uploaded_file(request.FILES['hackergotchi']) + except MultiValueDictKeyError: + check = (False, '') + + #save the author information + if check[0]: + f = request.FILES['hackergotchi'] + + #change the name of the file with the unique name created + f.name = check[1] + + author = Authors(author_name=request.POST['name'], author_surname=request.POST['surname'], author_email=request.POST['email'], channel_url=request.POST['feed'], author_face=f.name, is_approved=0, current_status=5) + else: + author = Authors(author_name=request.POST['name'], author_surname=request.POST['surname'], author_email=request.POST['email'], channel_url=request.POST['feed'], is_approved=0, current_status=5) + author.save() + + #save the history with explanation + author.history_set.create(action_type=5, action_date=datetime.datetime.now(), action_explanation=request.POST['message']) + #send mail part + #fill it here + return render_response(request, 'main/subscribe.html/',{'submit': 'done', 'BASE_URL': BASE_URL}) + else: + form = ContactForm() + return render_response(request, 'main/subscribe.html', {'form': form, 'BASE_URL': BASE_URL}) + +def handle_uploaded_file(f): + + if not f.name: return False + #lets create a unique name for the image + t = str(time.time()).split(".") + img_name = t[0] + t[1].f.name.split(".")[1] + f.name = img_name + path = os.path.join(settings.FILE_UPLOAD_TEMP_DIR, f.name) + + destination = open(path, 'wb+') + for chunk in f.chunks(): + destination.write(chunk) + destination.close() + + m = magic.open(magic.MAGIC_MIME) + m.load() + t = m.file(path) + if t.split('/')[0] == 'image': + return (True, f.name) + else: + os.unlink(path) + return (False, '') + +def list_members(request): + + authors = Authors.objects.all() + + return render_response(request, 'main/members.html', {'members': authors, 'BASE_URL': BASE_URL}) diff --git a/DJAGEN/branches/oguz/djagen/collector/wrappers.py b/DJAGEN/branches/oguz/djagen/collector/wrappers.py new file mode 100755 index 0000000..af35741 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/collector/wrappers.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django.shortcuts import render_to_response +from django.template import RequestContext + +def render_response(req, *args, **kwargs): + """ + Wrapper function that automatically adds "context_instance" to render_to_response + """ + + kwargs['context_instance'] = RequestContext(req) + return render_to_response(*args, **kwargs) diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/__init__.py b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/atom.xml.tmpl b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/atom.xml.tmpl new file mode 100755 index 0000000..c444d01 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/atom.xml.tmpl @@ -0,0 +1,61 @@ + + + + <TMPL_VAR name> + "/> + "/> + + + + + + xml:lang=""> + xml:lang="<TMPL_VAR title_language>"</TMPL_IF>><TMPL_VAR title ESCAPE="HTML"> + "/> + + + xml:lang=""> + + + + + + + + + + + + + + + + + + + + + <TMPL_VAR channel_title ESCAPE="HTML"> + + <TMPL_VAR channel_name ESCAPE="HTML"> + + + + + "/> + + + + + + + + + + + + + + + + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/atom.xml.tmplc b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/atom.xml.tmplc new file mode 100755 index 0000000..b6c89d2 Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/atom.xml.tmplc differ diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config.ini b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config.ini new file mode 100755 index 0000000..a8b8191 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config.ini @@ -0,0 +1,864 @@ +[Planet] +name = Linux Gezegeni +link = http://gezegen.linux.org.tr +owner_name = Gezegen Ekibi +owner_email = gezegen@linux.org.tr +cache_directory = cache +new_feed_items = 1 +log_level = DEBUG +template_files = gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl +output_dir = www/ +# items_per_page = 15 +items_per_page = 25 +#days_per_page = 0 +feed_timeout = 20 + +# future_dates = ignore_date +# ignore_in_feed = updated + +encoding = utf-8 +locale = tr_TR.UTF-8 + +date_format = %d %b %Y @ %I:%M %p +#date_format = %B %d, %Y %I:%M %p +new_date_format = %d %B %Y + +[DEFAULT] +facewidth = 64 +faceheight = 64 + + +[http://ahmet.pardusman.org/blog/feed/?cat=2] +name = Ahmet Aygün +face = ahmetaygun.png +nick = aaygun +label = Personal +id = 1 + +#[http://arda.pardusman.org/blog/tag/gezegen/feed/] +#name = Arda Çetin +#face = ardacetin.png +label = Personal +id = 2 + +#12 Nisan 2007'de rss adresi degisti. DG. +#Eskisi : http://cekirdek.pardus.org.tr/~meren/blog/rss.cgi] +[http://cekirdek.pardus.org.tr/~meren/blog/feed/rss/] +name = A. Murat Eren +face = meren.png +nick = meren +label = Personal +id = 3 + +[http://www.ademalpyildiz.com.tr/feed/] +name = Adem Alp Yıldız +nick = ayildiz +label = Personal +id = 4 + +[http://www.erdinc.info/?cat=6&feed=rss2] +name = Ali Erdinç Köroğlu +face = alierdinckoroglu.png +nick = aek +label = Personal +id = 5 + +# Gezegen'de gorugumuz yazisi uzerine cikartildi. DG, 12 Nisan 2007 +# http://burkinafasafiso.com/2007/04/12/gezegene-elveda/ +#[http://www.burkinafasafiso.com/category/acik-kaynak/feed/] +#name = Ali Işıngör + +[http://feeds2.feedburner.com/raptiye_linux_gezegeni] +name = Alper Kanat +face = alperkanat.png +nick = akanat +label = Personal +id = 6 + +[http://blog.oguz.biz/category/gezegen/rss2] +name = Alper Oğuz +face = +nick = aoguz +label = Personal +id = 7 + +[http://www.murekkep.org/konu/acik-kaynak-ve-linux/feed] +name = Alper Orus +nick = aorus +label = Personal +id = 8 + +[http://www.alpersomuncu.com/weblog/index.php?/feeds/categories/8-Linux.rss] +name = Alper Somuncu +face = alpersomuncu.png +nick = asomuncu +label = Personal +id = 9 + +[http://armish.linux-sevenler.org/blog/category/gezegen/feed] +name = Arman Aksoy +face = armanaksoy.png +nick = aaksoy +label = Personal +id = 10 + +[http://anilozbek.blogspot.com/feeds/posts/default/-/gnu%2Flinux] +name = Anıl Özbek +nick = aozbek +label = Personal +id = 11 + +# 18.01.2009 removed after asking baris metin, there is no xml feed, is not using this blog anymore +#[http://www.metin.org/gunluk/feed/rss/] +#name = Barış Metin +#face = barismetin.png + +[http://www.tuxworkshop.com/blog/?cat=8] +name = Barış Özyurt +face = barisozyurt.png +nick = bozyurt +label = Personal +id = 12 + +[http://feeds.feedburner.com/canburak-gezegen-linux] +name = Can Burak Çilingir +nick = cbcilingir +label = Personal +id = 13 + +[http://cankavaklioglu.name.tr/guncelgunce/archives/linux/index-rss.xml] +name = Can Kavaklıoğlu +nick = ckavaklioglu +label = Personal +id = 14 + +[http://devador.blogspot.com/feeds/posts/default/-/linux] +name = Ceyhun Alyeşil +nick = calyesil +label = Personal +id = 15 + +[http://blog.gunduz.org/index.php?/feeds/categories/1-OEzguer-Yazlm.rss] +name = Devrim Gündüz +face = devrimgunduz.png +nick = dgunduz +label = Personal +id = 16 + +[http://zzz.fisek.com.tr/seyir-defteri/?feed=rss2&cat=3] +name = Doruk Fişek +face = dorukfisek.png +nick = dfisek +label = Personal +id = 17 + +[http://ekin.fisek.com.tr/blog/wp-rss2.php?cat=5] +name = Ekin Meroğlu +face = ekinmeroglu.png +nick = emeroglu +label = Personal +id = 18 + +#[http://aylinux.blogspot.com/atom.xml] +#name = Emre Karaoğlu + +[http://feeds.feedburner.com/TheUselessJournalV4] +name = Erçin Eker +face = ercineker.png +nick = eeker +label = Personal +id = 19 + +# Ingilizce ygirdiler yazmasindan dolayi cikarildi +#[http://enveraltin.com/blog?flav=rss] +#name = Enver Altın +#nick = ealtin + +# kendi istegi ile gecici sure kaldirildi, gunluk ve host sorunlari yasadigini belirtti +# yeni adresi eklendi 06.10.2009 +[http://www.erhanekici.com/blog/category/gezegen/feed/] +name = Erhan Ekici +nick = eekinci +label = Personal +id = 20 + +#Kendi istedigi uzerine cikarildi 180707 +#[http://cekirdek.pardus.org.tr/~tekman/zangetsu/blog/feed/rss/Linux] +#name = Erkan Tekman +#face = erkantekman.png + +#[http://ileriseviye.org/blog/?feed=rss2] +#name = Emre Sevinç + +#temporarily removed, unavailable +#[http://www.faikuygur.com/blog/feed/?cat=-4] +#name = Faik Uygur +#face = faikuygur.png +#nick = fuygur + +[http://blog.arsln.org/category/gezegen/feed] +name = Fatih Arslan +nick = farslan +label = Personal +id = 21 + +[http://gokdenix.blogspot.com/feeds/posts/default/-/gezegen] +name = Gökdeniz Karadağ +nick = gkaradag +label = Personal +id = 22 + +[http://blog.ratonred.com/tag/gezegen-linux/feed] +name = Gökmen Göksel +face = gokmengoksel.png +nick = ggoksel +label = Personal +id = 23 + +[http://blog.gokmengorgen.net/?category_name=pardus-tr&feed=rss2] +name = Gökmen Görgen +face = gokmengorgen.png +nick = ggorgen +label = Personal +id = 24 + +[http://6kere9.com/blag/feed/rss/Genel/] +name = Gürer Özen +face = gurerozen.png +nick = gozen +label = Personal +id = 25 + +[http://www.hakanuygun.com/blog/?feed=atom&cat=13] +name = Hakan Uygun +nick = huygun +label = Personal +id = 26 + +#Istegi uzerine adresi degistirildi, Ingilizce gunlukler kismina gecmek istedigini soyledi 09042009 +#[http://www.huseyinuslu.net/topics/linux/feed] +#name = Hüseyin Uslu +#face = huseyinuslu.png +#nick = huslu + +#03/07/2007 Devrim Vasıtası ile çıkmak istedi +#[http://cekirdek.pardus.org.tr/~ismail/blog/rss.cgi] +#name = İsmail Dönmez +#face = ismaildonmez.png + +[http://www.koray.org/blog/wp-rss2.php?cat=7] +name = Koray Bostancı +nick = kbostanci +label = Personal +id = 27 + +#09/08/2007 tarihinde kendisi silinmesini istedi. +#[http://cekirdek.pardus.org.tr/~loker/zangetsu/blog/feed/rss/Pardus/] + +[http://marenostrum.blogsome.com/category/gezegen/feed/] +name = K. Deniz Öğüt +face = kdenizogut.png +nick = kdenizoguz +label = Personal +id = 28 + +[http://www.blockdiagram.net/blog/rss.xml] +name = Kerem Can Karakaş +nick = kckarakas +label = Personal +id = 29 + +[http://blog.oguz.name.tr/?feed=atom&cat=7] +name = Kaya Oğuz +face = kaya-oguz.png +nick = kayaoguz +label = Personal +id = 30 + +[http://leoman.gen.tr/Gezegen/feed] +name = Levent Yalçın +nick = lyalcin +label = Personal +id = 31 + +[http://blog.corporem.org/?feed=rss2&cat=3] +name = M.Tuğrul Yılmazer +face = tugrulyilmazer.png +nick = tyilmazer +label = Personal +id = 32 + +[http://www.amerikadabirgun.com/category/turkce/linux/feed] +name = Mehmet Büyüközer +nick = mbuyukozer +label = Personal +id = 33 + +[http://yildirim.isadamlari.org/tag/gezegen/feed] +name = Mehmet Salih Yıldırım +face = mehmetsalihyildirim.png +nick = msyildirim +label = Personal +id = 34 + +[http://mhazer.blogspot.com/feeds/posts/default/-/gezegen] +name = Murat Hazer +nick = mhazer +label = Personal +id = 35 + +#12052008 RSS ulasilmiyor +#31102008, rsssini tekrar aktive etti +#[http://mail.kivi.com.tr/blog/wp-rss2.php] +[http://kivi.com.tr/blog/?feed=rss2] +name = Murat Koç +nick = mkoc +label = Personal +id = 36 + +[http://panhaema.com/rss.php?mcat=linux] +name = Murat Sağlam +face = muratsaglam.png +nick = msaglam +label = Personal +id = 37 + +[http://mmakbas.wordpress.com/tag/gezegen/feed/] +name = M.Murat Akbaş +nick = makbas +label = Personal +id = 38 + +#[http://demir.web.tr/blog/atom.php] Atom patladı rss deneyelim +[http://feeds.feedburner.com/ndemirgezegen] +name = Necati Demir +face = necatidemir.png +nick = ndemir +label = Personal +id = 39 + +[http://nyucel.blogspot.com/feeds/posts/default/-/gezegen] +name = Necdet Yücel +face = necdetyucel.png +nick = nyucel +label = Personal +id = 40 + +[http://www.r-3.org/blog/?cat=4&feed=rss2] +name = Nihad Karslı +face = nihadkarsli.png +nick = nkarsli +label = Personal +id = 41 + +[http://www.yalazi.org/index.php/archives/category/gezegen/feed/] +name = Onur Yalazı +face = onuryalazi.png +nick = oyalazi +label = Personal +id = 42 + +[http://feeds.feedburner.com/oguzy-gezegen] +name = Oğuz Yarımtepe +face = oguzyarimtepe.png +nick = oyarimtepe +label = Personal +id = 43 + +[http://bilisimlab.com/blog/rss.php] +name = Ömer Fadıl Usta +nick = ousta +label = Personal +id = 44 + +#1.11.2008 de attığı epostada Ingilizce girdisine karşı Turkce bir adres istememize Ingilizce yazacagini soyledi ve gerekirse cikarin dedi +[http://feeds.feedburner.com/pinguar-gezegen] +name = Pınar Yanardağ +face = pinaryanardag.png +nick = pinaryanardag +label = Personal +id = 45 + +[http://nightwalkers.blogspot.com/atom.xml] +name = Serbülent Ünsal +nick = sunsal +label = Personal +id = 46 + +[http://gunluk.lkd.org.tr/category/gezegen/feed/] +name = LKD Gezegen Duyuruları +face = gezegencg.png +nick = gezegencg +label = Personal +id = 47 + +#[http://blogs.lkd.org.tr/seminercg/index.php?/feeds/categories/2-Seminer.rss] +[http://gunluk.lkd.org.tr/category/seminer/feed/] +name = LKD Seminer Duyuruları +face = seminercg.png +nick = seminercg +label = Personal +id = 48 + +[http://serveracim.blogspot.com/feeds/posts/default?alt=rss] +name = Server Acim +face = serveracim.png +nick = sacim +label = Personal +id = 49 + +[http://www.ayder.org/gunluk/?feed=rss2] +name = Sinan Alyürük +nick = salyuruk +label = Personal +id = 50 + +[http://talat.uyarer.com/?feed=rss2] +name= Talat Uyarer +nick = tuyarer +label = Personal +id = 51 + +[http://feeds.feedburner.com/tayfurtaybua] +name= Tayfur Taybuğa +face = tayfurtaybuga.png +nick = ttaybuga +label = Personal +id = 52 + +# Rsssindeki tarih sorunundan dolayı girdisinin gezegende tekrarlı gösteriminden dolayı geçici süre kaldırıldı 17112008 +# kendisinden gelen update ile adresi düzenlendi +[http://tonguc.name/blog/?flav=atom] +name = Tonguç Yumruk +face = tongucyumruk.png +nick = tyumruk +label = Personal +id = 53 + +[http://sehitoglu.web.tr/gunluk/?feed=rss2&cat=12] +name = Onur Tolga Şehitoğlu +nick = osehitoglu +label = Personal +id = 54 + +#12052008 RSS e ulasilmiyor +#[http://ergenoglu.org/blog/?feed=rss2] +#name = Üstün Ergenoğlu + +[http://handlet.blogspot.com/feeds/posts/default?alt=rss] +name = Ümran Kamar +face = umrankamar.png +nick = ukamar +label = Personal +id = 55 + +[http://zembereknlp.blogspot.com/feeds/posts/default?alt=rss] +name = Zemberek NLP +# face = +nick = zemberek +label = Personal +id = 56 + +[http://00101010.info/konu/teknik/index.rss] +name = Recai Oktaş +nick = roktas +label = Personal +id = 57 + +#21052007 Bu adresde kimse yok.. +#[http://geekshideout.blogspot.com/feeds/posts/default] +#name = Mehmet Erten + +[http://www.bugunlinux.com/?feed=rss2] +name = Ahmet Yıldız +nick = ayildiz +label = Personal +id = 58 + +#gecici olarak uzaklastirildi kufur ettigi icin +#[http://ish.kodzilla.org/blog/?feed=rss2&cat=4] +#name = İşbaran Akçayır + +[http://feeds.feedburner.com/SerkanLinuxGezegeni] +name = Serkan Altuntaş +nick = saltuntas +label = Personal +id = 59 + +[http://www.furkancaliskan.com/blog/category/gezegen/feed] +name = Furkan Çalışkan +nick = fcaliskan +label = Personal +id = 60 + +[http://eumur.wordpress.com/feed] +name = Umur Erdinç +nick = uerdinc +label = Personal +id = 61 + +#[http://blogs.lkd.org.tr/penguencg/index.php?/feeds/index.rss2] +#name = Penguen-CG +#face = +#email = + +[http://serkank.wordpress.com/category/linux/feed/atom] +name = Serkan Kaba +face = serkankaba.png +nick = skaba +label = Personal +id = 62 + +#[http://blogs.lkd.org.tr/standcg/index.php?/feeds/index.rss2] +#name = Stand + +[http://feeds.feedburner.com/nesimia-gezegen?format=xml] +name = Nesimi Acarca +nick = nacarca +label = Personal +id = 63 + +#Rsssindeki tarih sorunundan dolayı girdisinin gezegende tekrarlı gösteriminden dolayı geçici süre kaldırıldı 17112008 +#Kendisinden gelen yeni adres ile güncellendi +[http://www.soyoz.com/gunce/etiket/linux-gezegeni/feed] +name = Erol Soyöz +nick = esoyoz +label = Personal +id = 64 + +[http://gurcanozturk.com/feed/] +name = Gürcan Öztürk +nick = gurcanozturk +label = Personal +id = 65 + +[http://www.python-tr.com/feed/atom/] +name = Python-TR +nick = python-tr +label = Personal +id = 66 + +#20.08.2009 tarihinde kendi istekleri ile cikarildi +#[http://www.ozgurlukicin.com/rss/haber] +#name = Özgürlükiçin.com +#nick = ozgurlukicin + +[http://gunluk.lkd.org.tr/category/web/feed] +name = LKD Web Çalışma Grubu +nick = webcg +label = Personal +id = 67 + +#temporarily removed, 500 error +#[http://www.bahri.info/category/linux/feed] +#name = Bahri Meriç Canlı +#nick = bahrimeric + +[http://blogs.portakalteknoloji.com/bora/blog/feed/rss/] +name = Bora Güngören +nick = boragungoren +label = Personal +id = 68 + +#010608 gecici sure ile durduruldu +#[http://www.ozgurkaratas.com/index.php/feed/] +#name = Özgür Karataş + +[http://www.kirmizivesiyah.org/index.php/category/gezegen/feed/] +name = Kubilay Onur Güngör +nick = kogungor +label = Personal +id = 69 + +[http://gunluk.lkd.org.tr/category/yk/feed/] +name = LKD YK +nick = lkdyk +label = Personal +id = 70 + +[http://flyeater.wordpress.com/tag/lkd/feed] +name = Deniz Koçak +nick = dkocak +label = Personal +id = 71 + +[http://serkan.feyvi.org/blog/category/debian/feed] +name = Serkan Kenar +nick = skenar +label = Personal +id = 72 + +[http://armuting.blogspot.com/feeds/posts/default/-/lkd_gezegen] +name = Ali Erkan İMREK +nick = aeimrek +label = Personal +id = 73 + +[http://www.lkd.org.tr/news/aggregator/RSS] +name = LKD.org.tr +nick = lkd.org.tr +label = LKD +id = 74 + +[http://gunluk.lkd.org.tr/category/ftp/feed/] +name = LKD FTP Çalışma Grubu +nick = lkdftp +label = LKD +id = 75 + +[http://murattikil.blogspot.com/feeds/posts/default] +name = Murat TİKİL +nick = murattikil +label = Personal +id = 76 + +[http://www.burakdayioglu.net/category/linux/feed] +name = Burak Dayıoğlu +face = burakdayioglu.png +nick = burakdayioglu +label = Personal +id = 77 + +[http://feeds.feedburner.com/PardusLinuxOrgAnaSayfa] +name = Pardus-Linux.org +face = +nick = parduslinux +label = Community +id = 78 + +[http://www.linuxipuclari.com/category/gezegen/feed] +name = Linuxipuclari +face = linuxipuclari.png +nick = linuxipuclari +label = Community +id = 79 + +[http://www.ozgurkuru.net/ozgur/category/linuxgezegen/feed/] +name = Özgür Kuru +face = +nick = ozgurkuru +label = Personal +id = 80 + +[http://www.okanakyuz.com/?feed=rss2&cat=17] +name = Okan Akyüz +face = okanakyuz.png +nick = okanakyuz +label = Personal +id = 81 + +[http://gunluk.lkd.org.tr/category/senlik/feed/] +name = LKD Şenlik Çalışma Grubu +nick = lkdsenlik +label = Community +id = 82 + +[http://feeds2.feedburner.com/ekovanci?format=xml] +name = Eren Kovancı +nick = erenkovanci +label = Personal +id = 83 + +[http://www.heartsmagic.net/category/linux/feed/] +name = Serkan Çalış +nick = serkancalis +label = Personal +id = 84 + +[http://siyahsapka.blogspot.com/feeds/posts/default/-/Gezegen?alt=rss] +name = Fatih Özavcı +face = fatihozavci.png +nick = fatihozavci +label = Personal +id = 85 + +[http://gunluk.lkd.org.tr/category/sponsor/feed/] +name = LKD Sponsor Çalışma Grubu +nick = sponsorcg +label = LKD +id = 86 + +[http://gnome.org.tr/index.php?option=com_rss&feed=RSS2.0&no_html=1)] +name = GNOME Türkiye +nick = gnometr +#face = +label = Community +id = 87 + +[http://twitter.com/statuses/user_timeline/23496360.rss] +name = Şenlik Twitter Haberleri +nick = senliktwitter +label = LKD +id = 88 + +[http://ozguryazilim.com/?feed=rss2] +name = Ozguryazilim.com +nick = ozguryazilim +label = Community +id = 89 + +[http://linuxogrenmekistiyorum.com/feed/] +name = Fikret Tozak +nick = fikrettozak +label = Personal +id = 90 + +[http://emrahcom.blogspot.com/feeds/posts/default/-/lkd?alt=rss] +name = Emrah Eryılmaz +nick = emraheryilmaz +label = Personal +id = 91 + +[http://osjunkies.com/blog/author/findik/feed/rss/] +name = FINDIK Projesi +nick = findik +label = Community +id = 92 + +[http://www.samkon.org/?feed=rss2&cat=778] +name = Samed Konak +face = samedkonak.png +nick = samedkonak +label = Personal +id = 93 + +[http://canerblt.wordpress.com/tag/linux/feed] +name = Caner Bulut +nick = canerbulut +label = Personal +id = 94 + +[http://seridarus.blogspot.com/feeds/posts/default/-/gezegen] +name = Serdar Yiğit +nick = serdaryigit +label = Personal +id = 95 + +[http://cemosonmez.blogspot.com/feeds/posts/default/-/gezegen] +name = Cem Sönmez +nick = cemsonmez +label = Personal +id = 96 + +[http://www.teknozat.com/kategori/linux/feed] +name = Ümit Yaşar +nick = umityasar +label = Personal +id = 97 + +[http://blog.akgul.web.tr/?cat=2&feed=rss2] +name= Mustafa Akgül +nick = mustafaakgul +label = Personal +id = 98 + +[http://kapadokyayazilim.com/gunluk/omerakyuz/category/linux/feed/] +name = Ömer Akyüz +nick = omerakyuz +label = Personal +id = 99 + +[http://www.birazkisisel.com/tag/linux-gezegeni/feed/] +name = Hüseyin Berberoğlu +nick = huseyinberberoglu +label = Personal +id = 100 + +[http://www.efeciftci.com/category/gezegen/feed/] +name = Efe Çiftci +face = efeciftci.png +nick = efeciftci +label = Personal +id = 101 + +[http://ozgurmurat.blogspot.com/feeds/posts/default/-/lkd_gezegen] +name = Özgür Murat Homurlu +nick = ozgurmurat +label = Personal +id = 102 + +# title ve duzgun yazma sorunlarindan dolayi gecici sure kaldirildi +#[http://opensusetr.wordpress.com/category/gezegen/feed/] +[http://pardusever.blogspot.com/feeds/posts/default/-/gezegen] +name = Emre Can Şüşter +face = emrecansuster.png +nick = emrecan +label = Personal +id = 103 + +[http://ilkinbalkanay.blogspot.com/feeds/posts/default/-/Gezegen] +name = İlkin Ulas Balkanay +face = ilkinulas.png +nick = ilkinulas +label = Personal +id = 104 + +[http://kubilaykocabalkan.wordpress.com/tag/pardus/feed/] +name = Kubilay Kocabalkan +nick = kubilaykocabalkan +label = Personal +id = 105 + +[http://www.syslogs.org/feed/] +name = Cagri Ersen +nick = cagriersen +label = Personal +id = 106 + +[http://onuraslan.com/blog/etiket/gezegen/feed/] +name = Onur Aslan +nick = onuraslan +face = onuraslan.png +label = Personal +id = 107 + +[http://ercankuru.com.tr/index/category/gezegen/lkd-gezegeni/feed/] +name = Ercan Kuru +nick = ercankuru +label = Personal +id = 108 + +[http://www.bayramkaragoz.org/category/gezegen/feed/] +name = Bayram Karagöz +nick = bayramkaragoz +face = bayramkaragoz.png +label = Personal +id = 109 + +[http://gungorbasa.blogspot.com/feeds/posts/default/-/Gezegen] +name = Güngör Basa +nick = gungorbasa +label = Personal +id = 110 + +[http://www.sinanonur.com/konu/linuxgezegen/feed/] +name = Sinan Onur Altınuç +nick = sinanonur +face = sinanonur.png +label = Personal +id = 111 + +[http://blog.halid.org/tag/linux/feed/] +name = Halid Said Altuner +nick = halidaltuner +label = Personal +id = 112 + +[http://gunluk.lyildirim.net/etiket/gezegen/feed/] +name = Levent Yıldırım +nick = lyildirim +label = Personal +id = 113 + +[http://can.logikit.net/tag/yazilim/feed/] +name = Can İnce +nick = canince +face = canince.png +label = Personal +id = 114 + +[http://mkarakaplan.wordpress.com/category/gezegen/feed/] +name = Mustafa Karakaplan +nick = mustafakarakaplan +label = Personal +id = 115 diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_entries.ini b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_entries.ini new file mode 100755 index 0000000..a489912 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_entries.ini @@ -0,0 +1,834 @@ +[http://ahmet.pardusman.org/blog/feed/?cat=2] +name = Ahmet Aygün +face = ahmetaygun.png +nick = aaygun +label = Personal +id = 1 + +#[http://arda.pardusman.org/blog/tag/gezegen/feed/] +#name = Arda Çetin +#face = ardacetin.png +label = Personal +id = 2 + +#12 Nisan 2007'de rss adresi degisti. DG. +#Eskisi : http://cekirdek.pardus.org.tr/~meren/blog/rss.cgi] +[http://cekirdek.pardus.org.tr/~meren/blog/feed/rss/] +name = A. Murat Eren +face = meren.png +nick = meren +label = Personal +id = 3 + +[http://www.ademalpyildiz.com.tr/feed/] +name = Adem Alp Yıldız +nick = ayildiz +label = Personal +id = 4 + +[http://www.erdinc.info/?cat=6&feed=rss2] +name = Ali Erdinç Köroğlu +face = alierdinckoroglu.png +nick = aek +label = Personal +id = 5 + +# Gezegen'de gorugumuz yazisi uzerine cikartildi. DG, 12 Nisan 2007 +# http://burkinafasafiso.com/2007/04/12/gezegene-elveda/ +#[http://www.burkinafasafiso.com/category/acik-kaynak/feed/] +#name = Ali Işıngör + +[http://feeds2.feedburner.com/raptiye_linux_gezegeni] +name = Alper Kanat +face = alperkanat.png +nick = akanat +label = Personal +id = 6 + +[http://blog.oguz.biz/category/gezegen/rss2] +name = Alper Oğuz +face = +nick = aoguz +label = Personal +id = 7 + +[http://www.murekkep.org/konu/acik-kaynak-ve-linux/feed] +name = Alper Orus +nick = aorus +label = Personal +id = 8 + +[http://www.alpersomuncu.com/weblog/index.php?/feeds/categories/8-Linux.rss] +name = Alper Somuncu +face = alpersomuncu.png +nick = asomuncu +label = Personal +id = 9 + +[http://armish.linux-sevenler.org/blog/category/gezegen/feed] +name = Arman Aksoy +face = armanaksoy.png +nick = aaksoy +label = Personal +id = 10 + +[http://anilozbek.blogspot.com/feeds/posts/default/-/gnu%2Flinux] +name = Anıl Özbek +nick = aozbek +label = Personal +id = 11 + +# 18.01.2009 removed after asking baris metin, there is no xml feed, is not using this blog anymore +#[http://www.metin.org/gunluk/feed/rss/] +#name = Barış Metin +#face = barismetin.png + +[http://www.tuxworkshop.com/blog/?cat=8] +name = Barış Özyurt +face = barisozyurt.png +nick = bozyurt +label = Personal +id = 12 + +[http://feeds.feedburner.com/canburak-gezegen-linux] +name = Can Burak Çilingir +nick = cbcilingir +label = Personal +id = 13 + +[http://cankavaklioglu.name.tr/guncelgunce/archives/linux/index-rss.xml] +name = Can Kavaklıoğlu +nick = ckavaklioglu +label = Personal +id = 14 + +[http://devador.blogspot.com/feeds/posts/default/-/linux] +name = Ceyhun Alyeşil +nick = calyesil +label = Personal +id = 15 + +[http://blog.gunduz.org/index.php?/feeds/categories/1-OEzguer-Yazlm.rss] +name = Devrim Gündüz +face = devrimgunduz.png +nick = dgunduz +label = Personal +id = 16 + +[http://zzz.fisek.com.tr/seyir-defteri/?feed=rss2&cat=3] +name = Doruk Fişek +face = dorukfisek.png +nick = dfisek +label = Personal +id = 17 + +[http://ekin.fisek.com.tr/blog/wp-rss2.php?cat=5] +name = Ekin Meroğlu +face = ekinmeroglu.png +nick = emeroglu +label = Personal +id = 18 + +#[http://aylinux.blogspot.com/atom.xml] +#name = Emre Karaoğlu + +[http://feeds.feedburner.com/TheUselessJournalV4] +name = Erçin Eker +face = ercineker.png +nick = eeker +label = Personal +id = 19 + +# Ingilizce ygirdiler yazmasindan dolayi cikarildi +#[http://enveraltin.com/blog?flav=rss] +#name = Enver Altın +#nick = ealtin + +# kendi istegi ile gecici sure kaldirildi, gunluk ve host sorunlari yasadigini belirtti +# yeni adresi eklendi 06.10.2009 +[http://www.erhanekici.com/blog/category/gezegen/feed/] +name = Erhan Ekici +nick = eekinci +label = Personal +id = 20 + +#Kendi istedigi uzerine cikarildi 180707 +#[http://cekirdek.pardus.org.tr/~tekman/zangetsu/blog/feed/rss/Linux] +#name = Erkan Tekman +#face = erkantekman.png + +#[http://ileriseviye.org/blog/?feed=rss2] +#name = Emre Sevinç + +#temporarily removed, unavailable +#[http://www.faikuygur.com/blog/feed/?cat=-4] +#name = Faik Uygur +#face = faikuygur.png +#nick = fuygur + +[http://blog.arsln.org/category/gezegen/feed] +name = Fatih Arslan +nick = farslan +label = Personal +id = 21 + +[http://gokdenix.blogspot.com/feeds/posts/default/-/gezegen] +name = Gökdeniz Karadağ +nick = gkaradag +label = Personal +id = 22 + +[http://blog.ratonred.com/tag/gezegen-linux/feed] +name = Gökmen Göksel +face = gokmengoksel.png +nick = ggoksel +label = Personal +id = 23 + +[http://blog.gokmengorgen.net/?category_name=pardus-tr&feed=rss2] +name = Gökmen Görgen +face = gokmengorgen.png +nick = ggorgen +label = Personal +id = 24 + +[http://6kere9.com/blag/feed/rss/Genel/] +name = Gürer Özen +face = gurerozen.png +nick = gozen +label = Personal +id = 25 + +[http://www.hakanuygun.com/blog/?feed=atom&cat=13] +name = Hakan Uygun +nick = huygun +label = Personal +id = 26 + +#Istegi uzerine adresi degistirildi, Ingilizce gunlukler kismina gecmek istedigini soyledi 09042009 +#[http://www.huseyinuslu.net/topics/linux/feed] +#name = Hüseyin Uslu +#face = huseyinuslu.png +#nick = huslu + +#03/07/2007 Devrim Vasıtası ile çıkmak istedi +#[http://cekirdek.pardus.org.tr/~ismail/blog/rss.cgi] +#name = İsmail Dönmez +#face = ismaildonmez.png + +[http://www.koray.org/blog/wp-rss2.php?cat=7] +name = Koray Bostancı +nick = kbostanci +label = Personal +id = 27 + +#09/08/2007 tarihinde kendisi silinmesini istedi. +#[http://cekirdek.pardus.org.tr/~loker/zangetsu/blog/feed/rss/Pardus/] + +[http://marenostrum.blogsome.com/category/gezegen/feed/] +name = K. Deniz Öğüt +face = kdenizogut.png +nick = kdenizoguz +label = Personal +id = 28 + +[http://www.blockdiagram.net/blog/rss.xml] +name = Kerem Can Karakaş +nick = kckarakas +label = Personal +id = 29 + +[http://blog.oguz.name.tr/?feed=atom&cat=7] +name = Kaya Oğuz +face = kaya-oguz.png +nick = kayaoguz +label = Personal +id = 30 + +[http://leoman.gen.tr/Gezegen/feed] +name = Levent Yalçın +nick = lyalcin +label = Personal +id = 31 + +[http://blog.corporem.org/?feed=rss2&cat=3] +name = M.Tuğrul Yılmazer +face = tugrulyilmazer.png +nick = tyilmazer +label = Personal +id = 32 + +[http://www.amerikadabirgun.com/category/turkce/linux/feed] +name = Mehmet Büyüközer +nick = mbuyukozer +label = Personal +id = 33 + +[http://yildirim.isadamlari.org/tag/gezegen/feed] +name = Mehmet Salih Yıldırım +face = mehmetsalihyildirim.png +nick = msyildirim +label = Personal +id = 34 + +[http://mhazer.blogspot.com/feeds/posts/default/-/gezegen] +name = Murat Hazer +nick = mhazer +label = Personal +id = 35 + +#12052008 RSS ulasilmiyor +#31102008, rsssini tekrar aktive etti +#[http://mail.kivi.com.tr/blog/wp-rss2.php] +[http://kivi.com.tr/blog/?feed=rss2] +name = Murat Koç +nick = mkoc +label = Personal +id = 36 + +[http://panhaema.com/rss.php?mcat=linux] +name = Murat Sağlam +face = muratsaglam.png +nick = msaglam +label = Personal +id = 37 + +[http://mmakbas.wordpress.com/tag/gezegen/feed/] +name = M.Murat Akbaş +nick = makbas +label = Personal +id = 38 + +#[http://demir.web.tr/blog/atom.php] Atom patladı rss deneyelim +[http://feeds.feedburner.com/ndemirgezegen] +name = Necati Demir +face = necatidemir.png +nick = ndemir +label = Personal +id = 39 + +[http://nyucel.blogspot.com/feeds/posts/default/-/gezegen] +name = Necdet Yücel +face = necdetyucel.png +nick = nyucel +label = Personal +id = 40 + +[http://www.r-3.org/blog/?cat=4&feed=rss2] +name = Nihad Karslı +face = nihadkarsli.png +nick = nkarsli +label = Personal +id = 41 + +[http://www.yalazi.org/index.php/archives/category/gezegen/feed/] +name = Onur Yalazı +face = onuryalazi.png +nick = oyalazi +label = Personal +id = 42 + +[http://feeds.feedburner.com/oguzy-gezegen] +name = Oğuz Yarımtepe +face = oguzyarimtepe.png +nick = oyarimtepe +label = Personal +id = 43 + +[http://bilisimlab.com/blog/rss.php] +name = Ömer Fadıl Usta +nick = ousta +label = Personal +id = 44 + +#1.11.2008 de attığı epostada Ingilizce girdisine karşı Turkce bir adres istememize Ingilizce yazacagini soyledi ve gerekirse cikarin dedi +[http://feeds.feedburner.com/pinguar-gezegen] +name = Pınar Yanardağ +face = pinaryanardag.png +nick = pinaryanardag +label = Personal +id = 45 + +[http://nightwalkers.blogspot.com/atom.xml] +name = Serbülent Ünsal +nick = sunsal +label = Personal +id = 46 + +[http://gunluk.lkd.org.tr/category/gezegen/feed/] +name = LKD Gezegen Duyuruları +face = gezegencg.png +nick = gezegencg +label = Personal +id = 47 + +#[http://blogs.lkd.org.tr/seminercg/index.php?/feeds/categories/2-Seminer.rss] +[http://gunluk.lkd.org.tr/category/seminer/feed/] +name = LKD Seminer Duyuruları +face = seminercg.png +nick = seminercg +label = Personal +id = 48 + +[http://serveracim.blogspot.com/feeds/posts/default?alt=rss] +name = Server Acim +face = serveracim.png +nick = sacim +label = Personal +id = 49 + +[http://www.ayder.org/gunluk/?feed=rss2] +name = Sinan Alyürük +nick = salyuruk +label = Personal +id = 50 + +[http://talat.uyarer.com/?feed=rss2] +name= Talat Uyarer +nick = tuyarer +label = Personal +id = 51 + +[http://feeds.feedburner.com/tayfurtaybua] +name= Tayfur Taybuğa +face = tayfurtaybuga.png +nick = ttaybuga +label = Personal +id = 52 + +# Rsssindeki tarih sorunundan dolayı girdisinin gezegende tekrarlı gösteriminden dolayı geçici süre kaldırıldı 17112008 +# kendisinden gelen update ile adresi düzenlendi +[http://tonguc.name/blog/?flav=atom] +name = Tonguç Yumruk +face = tongucyumruk.png +nick = tyumruk +label = Personal +id = 53 + +[http://sehitoglu.web.tr/gunluk/?feed=rss2&cat=12] +name = Onur Tolga Şehitoğlu +nick = osehitoglu +label = Personal +id = 54 + +#12052008 RSS e ulasilmiyor +#[http://ergenoglu.org/blog/?feed=rss2] +#name = Üstün Ergenoğlu + +[http://handlet.blogspot.com/feeds/posts/default?alt=rss] +name = Ümran Kamar +face = umrankamar.png +nick = ukamar +label = Personal +id = 55 + +[http://zembereknlp.blogspot.com/feeds/posts/default?alt=rss] +name = Zemberek NLP +# face = +nick = zemberek +label = Personal +id = 56 + +[http://00101010.info/konu/teknik/index.rss] +name = Recai Oktaş +nick = roktas +label = Personal +id = 57 + +#21052007 Bu adresde kimse yok.. +#[http://geekshideout.blogspot.com/feeds/posts/default] +#name = Mehmet Erten + +[http://www.bugunlinux.com/?feed=rss2] +name = Ahmet Yıldız +nick = ayildiz +label = Personal +id = 58 + +#gecici olarak uzaklastirildi kufur ettigi icin +#[http://ish.kodzilla.org/blog/?feed=rss2&cat=4] +#name = İşbaran Akçayır + +[http://feeds.feedburner.com/SerkanLinuxGezegeni] +name = Serkan Altuntaş +nick = saltuntas +label = Personal +id = 59 + +[http://www.furkancaliskan.com/blog/category/gezegen/feed] +name = Furkan Çalışkan +nick = fcaliskan +label = Personal +id = 60 + +[http://eumur.wordpress.com/feed] +name = Umur Erdinç +nick = uerdinc +label = Personal +id = 61 + +#[http://blogs.lkd.org.tr/penguencg/index.php?/feeds/index.rss2] +#name = Penguen-CG +#face = +#email = + +[http://serkank.wordpress.com/category/linux/feed/atom] +name = Serkan Kaba +face = serkankaba.png +nick = skaba +label = Personal +id = 62 + +#[http://blogs.lkd.org.tr/standcg/index.php?/feeds/index.rss2] +#name = Stand + +[http://feeds.feedburner.com/nesimia-gezegen?format=xml] +name = Nesimi Acarca +nick = nacarca +label = Personal +id = 63 + +#Rsssindeki tarih sorunundan dolayı girdisinin gezegende tekrarlı gösteriminden dolayı geçici süre kaldırıldı 17112008 +#Kendisinden gelen yeni adres ile güncellendi +[http://www.soyoz.com/gunce/etiket/linux-gezegeni/feed] +name = Erol Soyöz +nick = esoyoz +label = Personal +id = 64 + +[http://gurcanozturk.com/feed/] +name = Gürcan Öztürk +nick = gurcanozturk +label = Personal +id = 65 + +[http://www.python-tr.com/feed/atom/] +name = Python-TR +nick = python-tr +label = Personal +id = 66 + +#20.08.2009 tarihinde kendi istekleri ile cikarildi +#[http://www.ozgurlukicin.com/rss/haber] +#name = Özgürlükiçin.com +#nick = ozgurlukicin + +[http://gunluk.lkd.org.tr/category/web/feed] +name = LKD Web Çalışma Grubu +nick = webcg +label = Personal +id = 67 + +#temporarily removed, 500 error +#[http://www.bahri.info/category/linux/feed] +#name = Bahri Meriç Canlı +#nick = bahrimeric + +[http://blogs.portakalteknoloji.com/bora/blog/feed/rss/] +name = Bora Güngören +nick = boragungoren +label = Personal +id = 68 + +#010608 gecici sure ile durduruldu +#[http://www.ozgurkaratas.com/index.php/feed/] +#name = Özgür Karataş + +[http://www.kirmizivesiyah.org/index.php/category/gezegen/feed/] +name = Kubilay Onur Güngör +nick = kogungor +label = Personal +id = 69 + +[http://gunluk.lkd.org.tr/category/yk/feed/] +name = LKD YK +nick = lkdyk +label = Personal +id = 70 + +[http://flyeater.wordpress.com/tag/lkd/feed] +name = Deniz Koçak +nick = dkocak +label = Personal +id = 71 + +[http://serkan.feyvi.org/blog/category/debian/feed] +name = Serkan Kenar +nick = skenar +label = Personal +id = 72 + +[http://armuting.blogspot.com/feeds/posts/default/-/lkd_gezegen] +name = Ali Erkan İMREK +nick = aeimrek +label = Personal +id = 73 + +[http://www.lkd.org.tr/news/aggregator/RSS] +name = LKD.org.tr +nick = lkd.org.tr +label = LKD +id = 74 + +[http://gunluk.lkd.org.tr/category/ftp/feed/] +name = LKD FTP Çalışma Grubu +nick = lkdftp +label = LKD +id = 75 + +[http://murattikil.blogspot.com/feeds/posts/default] +name = Murat TİKİL +nick = murattikil +label = Personal +id = 76 + +[http://www.burakdayioglu.net/category/linux/feed] +name = Burak Dayıoğlu +face = burakdayioglu.png +nick = burakdayioglu +label = Personal +id = 77 + +[http://feeds.feedburner.com/PardusLinuxOrgAnaSayfa] +name = Pardus-Linux.org +face = +nick = parduslinux +label = Community +id = 78 + +[http://www.linuxipuclari.com/category/gezegen/feed] +name = Linuxipuclari +face = linuxipuclari.png +nick = linuxipuclari +label = Community +id = 79 + +[http://www.ozgurkuru.net/ozgur/category/linuxgezegen/feed/] +name = Özgür Kuru +face = +nick = ozgurkuru +label = Personal +id = 80 + +[http://www.okanakyuz.com/?feed=rss2&cat=17] +name = Okan Akyüz +face = okanakyuz.png +nick = okanakyuz +label = Personal +id = 81 + +[http://gunluk.lkd.org.tr/category/senlik/feed/] +name = LKD Şenlik Çalışma Grubu +nick = lkdsenlik +label = Community +id = 82 + +[http://feeds2.feedburner.com/ekovanci?format=xml] +name = Eren Kovancı +nick = erenkovanci +label = Personal +id = 83 + +[http://www.heartsmagic.net/category/linux/feed/] +name = Serkan Çalış +nick = serkancalis +label = Personal +id = 84 + +[http://siyahsapka.blogspot.com/feeds/posts/default/-/Gezegen?alt=rss] +name = Fatih Özavcı +face = fatihozavci.png +nick = fatihozavci +label = Personal +id = 85 + +[http://gunluk.lkd.org.tr/category/sponsor/feed/] +name = LKD Sponsor Çalışma Grubu +nick = sponsorcg +label = LKD +id = 86 + +[http://gnome.org.tr/index.php?option=com_rss&feed=RSS2.0&no_html=1)] +name = GNOME Türkiye +nick = gnometr +#face = +label = Community +id = 87 + +[http://twitter.com/statuses/user_timeline/23496360.rss] +name = Şenlik Twitter Haberleri +nick = senliktwitter +label = LKD +id = 88 + +[http://ozguryazilim.com/?feed=rss2] +name = Ozguryazilim.com +nick = ozguryazilim +label = Community +id = 89 + +[http://linuxogrenmekistiyorum.com/feed/] +name = Fikret Tozak +nick = fikrettozak +label = Personal +id = 90 + +[http://emrahcom.blogspot.com/feeds/posts/default/-/lkd?alt=rss] +name = Emrah Eryılmaz +nick = emraheryilmaz +label = Personal +id = 91 + +[http://osjunkies.com/blog/author/findik/feed/rss/] +name = FINDIK Projesi +nick = findik +label = Community +id = 92 + +[http://www.samkon.org/?feed=rss2&cat=778] +name = Samed Konak +face = samedkonak.png +nick = samedkonak +label = Personal +id = 93 + +[http://canerblt.wordpress.com/tag/linux/feed] +name = Caner Bulut +nick = canerbulut +label = Personal +id = 94 + +[http://seridarus.blogspot.com/feeds/posts/default/-/gezegen] +name = Serdar Yiğit +nick = serdaryigit +label = Personal +id = 95 + +[http://cemosonmez.blogspot.com/feeds/posts/default/-/gezegen] +name = Cem Sönmez +nick = cemsonmez +label = Personal +id = 96 + +[http://www.teknozat.com/kategori/linux/feed] +name = Ümit Yaşar +nick = umityasar +label = Personal +id = 97 + +[http://blog.akgul.web.tr/?cat=2&feed=rss2] +name= Mustafa Akgül +nick = mustafaakgul +label = Personal +id = 98 + +[http://kapadokyayazilim.com/gunluk/omerakyuz/category/linux/feed/] +name = Ömer Akyüz +nick = omerakyuz +label = Personal +id = 99 + +[http://www.birazkisisel.com/tag/linux-gezegeni/feed/] +name = Hüseyin Berberoğlu +nick = huseyinberberoglu +label = Personal +id = 100 + +[http://www.efeciftci.com/category/gezegen/feed/] +name = Efe Çiftci +face = efeciftci.png +nick = efeciftci +label = Personal +id = 101 + +[http://ozgurmurat.blogspot.com/feeds/posts/default/-/lkd_gezegen] +name = Özgür Murat Homurlu +nick = ozgurmurat +label = Personal +id = 102 + +# title ve duzgun yazma sorunlarindan dolayi gecici sure kaldirildi +#[http://opensusetr.wordpress.com/category/gezegen/feed/] +[http://pardusever.blogspot.com/feeds/posts/default/-/gezegen] +name = Emre Can Şüşter +face = emrecansuster.png +nick = emrecan +label = Personal +id = 103 + +[http://ilkinbalkanay.blogspot.com/feeds/posts/default/-/Gezegen] +name = İlkin Ulas Balkanay +face = ilkinulas.png +nick = ilkinulas +label = Personal +id = 104 + +[http://kubilaykocabalkan.wordpress.com/tag/pardus/feed/] +name = Kubilay Kocabalkan +nick = kubilaykocabalkan +label = Personal +id = 105 + +[http://www.syslogs.org/feed/] +name = Cagri Ersen +nick = cagriersen +label = Personal +id = 106 + +[http://onuraslan.com/blog/etiket/gezegen/feed/] +name = Onur Aslan +nick = onuraslan +face = onuraslan.png +label = Personal +id = 107 + +[http://ercankuru.com.tr/index/category/gezegen/lkd-gezegeni/feed/] +name = Ercan Kuru +nick = ercankuru +label = Personal +id = 108 + +[http://www.bayramkaragoz.org/category/gezegen/feed/] +name = Bayram Karagöz +nick = bayramkaragoz +face = bayramkaragoz.png +label = Personal +id = 109 + +[http://gungorbasa.blogspot.com/feeds/posts/default/-/Gezegen] +name = Güngör Basa +nick = gungorbasa +label = Personal +id = 110 + +[http://www.sinanonur.com/konu/linuxgezegen/feed/] +name = Sinan Onur Altınuç +nick = sinanonur +face = sinanonur.png +label = Personal +id = 111 + +[http://blog.halid.org/tag/linux/feed/] +name = Halid Said Altuner +nick = halidaltuner +label = Personal +id = 112 + +[http://gunluk.lyildirim.net/etiket/gezegen/feed/] +name = Levent Yıldırım +nick = lyildirim +label = Personal +id = 113 + +[http://can.logikit.net/tag/yazilim/feed/] +name = Can İnce +nick = canince +face = canince.png +label = Personal +id = 114 + +[http://mkarakaplan.wordpress.com/category/gezegen/feed/] +name = Mustafa Karakaplan +nick = mustafakarakaplan +label = Personal +id = 115 diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_entries.xml b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_entries.xml new file mode 100755 index 0000000..f9848a4 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_entries.xml @@ -0,0 +1,17 @@ + + + [http://www.bugunlinux.com/?feed=rss2] + Ahmet Yıldız + ayildiz + + 1 + + + + [http://www.bugunlinux.com/?feed=rss3] + Ahmet Yıldızz + ayildizz + + 2 + + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_header.ini b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_header.ini new file mode 100755 index 0000000..b1fcdab --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_header.ini @@ -0,0 +1,28 @@ +[Planet] +name = Linux Gezegeni +link = http://gezegen.linux.org.tr +owner_name = Gezegen Ekibi +owner_email = gezegen@linux.org.tr +cache_directory = cache +new_feed_items = 1 +log_level = DEBUG +template_files = gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl +output_dir = www/ +# items_per_page = 15 +items_per_page = 25 +#days_per_page = 0 +feed_timeout = 20 + +# future_dates = ignore_date +# ignore_in_feed = updated + +encoding = utf-8 +locale = tr_TR.UTF-8 + +date_format = %d %b %Y @ %I:%M %p +#date_format = %B %d, %Y %I:%M %p +new_date_format = %d %B %Y + +[DEFAULT] +facewidth = 64 +faceheight = 64 diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_header.xml b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_header.xml new file mode 100755 index 0000000..949e8cf --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/config_header.xml @@ -0,0 +1,28 @@ + +
+ [Planet] + Linux Gezegeni + http://gezegen.linux.org.tr + Gezegen Ekibi + gezegen@linux.org.tr + cache + 1 + DEBUG + gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl + www/ + 25 + 20 + + utf-8 + tr_TR.UTF-8 + + %d %b %Y @ %I:%M %p + new_date_format = %d %B %Y +
+ +
+ [DEFAULT] + 64 + 64 +
+
diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/feeds.html.tmpl b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/feeds.html.tmpl new file mode 100755 index 0000000..acd9479 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/feeds.html.tmpl @@ -0,0 +1,22 @@ +
+ +
diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/feeds.html.tmplc b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/feeds.html.tmplc new file mode 100755 index 0000000..bf4e68f Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/feeds.html.tmplc differ diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/foafroll.xml.tmpl b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/foafroll.xml.tmpl new file mode 100755 index 0000000..f344738 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/foafroll.xml.tmpl @@ -0,0 +1,31 @@ + + + + + + " /> + + + + + + + "> + + + " /> + + + + + + + + + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/foafroll.xml.tmplc b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/foafroll.xml.tmplc new file mode 100755 index 0000000..15bbc58 Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/foafroll.xml.tmplc differ diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/index.html.tmpl b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/index.html.tmpl new file mode 100755 index 0000000..7726f6b --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/index.html.tmpl @@ -0,0 +1,356 @@ + + + + <TMPL_VAR name> + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+ + +

+
+ +
+ + +
+
+
+ +

+ "> + + +

+ +
+   +
+
+
+
+
+ +
+
+
+ + "> + + + + &title=" target="_blank"> + + + " target="_blank"> + + +
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/index.html.tmplc b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/index.html.tmplc new file mode 100755 index 0000000..b6b5d88 Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/index.html.tmplc differ diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/opml.xml.tmpl b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/opml.xml.tmpl new file mode 100755 index 0000000..50bbabe --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/opml.xml.tmpl @@ -0,0 +1,16 @@ + + + + <TMPL_VAR name> + + + + + + + + + " xmlUrl=""/> + + + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/opml.xml.tmplc b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/opml.xml.tmplc new file mode 100755 index 0000000..75a8123 Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/opml.xml.tmplc differ diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss10.xml.tmpl b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss10.xml.tmpl new file mode 100755 index 0000000..0cd709b --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss10.xml.tmpl @@ -0,0 +1,37 @@ + + +"> + <TMPL_VAR name> + + - + + + + + " /> + + + + + + +"> + <TMPL_VAR channel_name><TMPL_IF title>: <TMPL_VAR title></TMPL_IF> + + + + + + + + + + + + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss10.xml.tmplc b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss10.xml.tmplc new file mode 100755 index 0000000..f5e21d4 Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss10.xml.tmplc differ diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss20.xml.tmpl b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss20.xml.tmpl new file mode 100755 index 0000000..3ff7a11 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss20.xml.tmpl @@ -0,0 +1,30 @@ + + + + + <TMPL_VAR name> + + en + - + + + + <TMPL_VAR channel_name><TMPL_IF title>: <TMPL_VAR title></TMPL_IF> + + + + + + " align="right" width="" height="">]]> + + + + + + + + + + + + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss20.xml.tmplc b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss20.xml.tmplc new file mode 100755 index 0000000..bb43467 Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/rss20.xml.tmplc differ diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/sidebar.html.tmpl b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/sidebar.html.tmpl new file mode 100755 index 0000000..acfdf4c --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/sidebar.html.tmpl @@ -0,0 +1,17 @@ + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/sidebar.html.tmplc b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/sidebar.html.tmplc new file mode 100755 index 0000000..8f2420a Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/sidebar.html.tmplc differ diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/simple.html.tmpl b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/simple.html.tmpl new file mode 100755 index 0000000..2c20c6a --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/simple.html.tmpl @@ -0,0 +1,74 @@ + + + + + + + <TMPL_VAR name> + + + + + + + + + + + + + + + +

+
+ +
+ + + +
+
+ +

">

+
+
+
+ +
+ + + +
+ +
+
+ + + + + + + + + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/simple.html.tmplc b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/simple.html.tmplc new file mode 100755 index 0000000..949265e Binary files /dev/null and b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/simple.html.tmplc differ diff --git a/DJAGEN/branches/oguz/djagen/gezegen/gezegen/zaman.sh b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/zaman.sh new file mode 100755 index 0000000..e0c9a2b --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/gezegen/zaman.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +while read x +do + echo "$(date)::$x" +done diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet-cache.py b/DJAGEN/branches/oguz/djagen/gezegen/planet-cache.py new file mode 100755 index 0000000..9334583 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet-cache.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Planet cache tool. + +""" + +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + + +import os +import sys +import time +import dbhash +import ConfigParser + +import planet + + +def usage(): + print "Usage: planet-cache [options] CACHEFILE [ITEMID]..." + print + print "Examine and modify information in the Planet cache." + print + print "Channel Commands:" + print " -C, --channel Display known information on the channel" + print " -L, --list List items in the channel" + print " -K, --keys List all keys found in channel items" + print + print "Item Commands (need ITEMID):" + print " -I, --item Display known information about the item(s)" + print " -H, --hide Mark the item(s) as hidden" + print " -U, --unhide Mark the item(s) as not hidden" + print + print "Other Options:" + print " -h, --help Display this help message and exit" + sys.exit(0) + +def usage_error(msg, *args): + print >>sys.stderr, msg, " ".join(args) + print >>sys.stderr, "Perhaps you need --help ?" + sys.exit(1) + +def print_keys(item, title): + keys = item.keys() + keys.sort() + key_len = max([ len(k) for k in keys ]) + + print title + ":" + for key in keys: + if item.key_type(key) == item.DATE: + value = time.strftime(planet.TIMEFMT_ISO, item[key]) + else: + value = str(item[key]) + print " %-*s %s" % (key_len, key, fit_str(value, 74 - key_len)) + +def fit_str(string, length): + if len(string) <= length: + return string + else: + return string[:length-4] + " ..." + + +if __name__ == "__main__": + cache_file = None + want_ids = 0 + ids = [] + + command = None + + for arg in sys.argv[1:]: + if arg == "-h" or arg == "--help": + usage() + elif arg == "-C" or arg == "--channel": + if command is not None: + usage_error("Only one command option may be supplied") + command = "channel" + elif arg == "-L" or arg == "--list": + if command is not None: + usage_error("Only one command option may be supplied") + command = "list" + elif arg == "-K" or arg == "--keys": + if command is not None: + usage_error("Only one command option may be supplied") + command = "keys" + elif arg == "-I" or arg == "--item": + if command is not None: + usage_error("Only one command option may be supplied") + command = "item" + want_ids = 1 + elif arg == "-H" or arg == "--hide": + if command is not None: + usage_error("Only one command option may be supplied") + command = "hide" + want_ids = 1 + elif arg == "-U" or arg == "--unhide": + if command is not None: + usage_error("Only one command option may be supplied") + command = "unhide" + want_ids = 1 + elif arg.startswith("-"): + usage_error("Unknown option:", arg) + else: + if cache_file is None: + cache_file = arg + elif want_ids: + ids.append(arg) + else: + usage_error("Unexpected extra argument:", arg) + + if cache_file is None: + usage_error("Missing expected cache filename") + elif want_ids and not len(ids): + usage_error("Missing expected entry ids") + + # Open the cache file directly to get the URL it represents + try: + db = dbhash.open(cache_file) + url = db["url"] + db.close() + except dbhash.bsddb._db.DBError, e: + print >>sys.stderr, cache_file + ":", e.args[1] + sys.exit(1) + except KeyError: + print >>sys.stderr, cache_file + ": Probably not a cache file" + sys.exit(1) + + # Now do it the right way :-) + my_planet = planet.Planet(ConfigParser.ConfigParser()) + my_planet.cache_directory = os.path.dirname(cache_file) + channel = planet.Channel(my_planet, url) + + for item_id in ids: + if not channel.has_item(item_id): + print >>sys.stderr, item_id + ": Not in channel" + sys.exit(1) + + # Do the user's bidding + if command == "channel": + print_keys(channel, "Channel Keys") + + elif command == "item": + for item_id in ids: + item = channel.get_item(item_id) + print_keys(item, "Item Keys for %s" % item_id) + + elif command == "list": + print "Items in Channel:" + for item in channel.items(hidden=1, sorted=1): + print " " + item.id + print " " + time.strftime(planet.TIMEFMT_ISO, item.date) + if hasattr(item, "title"): + print " " + fit_str(item.title, 70) + if hasattr(item, "hidden"): + print " (hidden)" + + elif command == "keys": + keys = {} + for item in channel.items(): + for key in item.keys(): + keys[key] = 1 + + keys = keys.keys() + keys.sort() + + print "Keys used in Channel:" + for key in keys: + print " " + key + print + + print "Use --item to output values of particular items." + + elif command == "hide": + for item_id in ids: + item = channel.get_item(item_id) + if hasattr(item, "hidden"): + print item_id + ": Already hidden." + else: + item.hidden = "yes" + + channel.cache_write() + print "Done." + + elif command == "unhide": + for item_id in ids: + item = channel.get_item(item_id) + if hasattr(item, "hidden"): + del(item.hidden) + else: + print item_id + ": Not hidden." + + channel.cache_write() + print "Done." diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet.py b/DJAGEN/branches/oguz/djagen/gezegen/planet.py new file mode 100755 index 0000000..3cff8fd --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet.py @@ -0,0 +1,273 @@ +#!/usr/bin/env python +"""The Planet aggregator. + +A flexible and easy-to-use aggregator for generating websites. + +Visit http://www.planetplanet.org/ for more information and to download +the latest version. + +Requires Python 2.1, recommends 2.3. +""" + +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + +import datetime + +import os +import sys +import time +import locale +import urlparse + +import planet + +from ConfigParser import ConfigParser + +# Default configuration file path +CONFIG_FILE = "config.ini" + +# Defaults for the [Planet] config section +PLANET_NAME = "Unconfigured Planet" +PLANET_LINK = "Unconfigured Planet" +PLANET_FEED = None +OWNER_NAME = "Anonymous Coward" +OWNER_EMAIL = "" +LOG_LEVEL = "WARNING" +FEED_TIMEOUT = 20 # seconds + +# Default template file list +TEMPLATE_FILES = "examples/basic/planet.html.tmpl" + +#part for django api usage +import sys +sys.path.append('/home/oguz/django-projects/') +import os +os.environ['DJANGO_SETTINGS_MODULE'] = 'djagen.settings' +from djagen.collector.models import * + +def config_get(config, section, option, default=None, raw=0, vars=None): + """Get a value from the configuration, with a default.""" + if config.has_option(section, option): + return config.get(section, option, raw=raw, vars=None) + else: + return default + +def main(): + config_file = CONFIG_FILE + offline = 0 + verbose = 0 + + for arg in sys.argv[1:]: + if arg == "-h" or arg == "--help": + print "Usage: planet [options] [CONFIGFILE]" + print + print "Options:" + print " -v, --verbose DEBUG level logging during update" + print " -o, --offline Update the Planet from the cache only" + print " -h, --help Display this help message and exit" + print + sys.exit(0) + elif arg == "-v" or arg == "--verbose": + verbose = 1 + elif arg == "-o" or arg == "--offline": + offline = 1 + elif arg.startswith("-"): + print >>sys.stderr, "Unknown option:", arg + sys.exit(1) + else: + config_file = arg + + # Read the configuration file + config = ConfigParser() + config.read(config_file) + if not config.has_section("Planet"): + print >>sys.stderr, "Configuration missing [Planet] section." + sys.exit(1) + + # Read the [Planet] config section + planet_name = config_get(config, "Planet", "name", PLANET_NAME) + planet_link = config_get(config, "Planet", "link", PLANET_LINK) + planet_feed = config_get(config, "Planet", "feed", PLANET_FEED) + owner_name = config_get(config, "Planet", "owner_name", OWNER_NAME) + owner_email = config_get(config, "Planet", "owner_email", OWNER_EMAIL) + if verbose: + log_level = "DEBUG" + else: + log_level = config_get(config, "Planet", "log_level", LOG_LEVEL) + feed_timeout = config_get(config, "Planet", "feed_timeout", FEED_TIMEOUT) + template_files = config_get(config, "Planet", "template_files", + TEMPLATE_FILES).split(" ") + + # Default feed to the first feed for which there is a template + if not planet_feed: + for template_file in template_files: + name = os.path.splitext(os.path.basename(template_file))[0] + if name.find('atom')>=0 or name.find('rss')>=0: + planet_feed = urlparse.urljoin(planet_link, name) + break + + # Define locale + if config.has_option("Planet", "locale"): + # The user can specify more than one locale (separated by ":") as + # fallbacks. + locale_ok = False + for user_locale in config.get("Planet", "locale").split(':'): + user_locale = user_locale.strip() + try: + locale.setlocale(locale.LC_ALL, user_locale) + except locale.Error: + pass + else: + locale_ok = True + break + if not locale_ok: + print >>sys.stderr, "Unsupported locale setting." + sys.exit(1) + + # Activate logging + planet.logging.basicConfig() + planet.logging.getLogger().setLevel(planet.logging.getLevelName(log_level)) + log = planet.logging.getLogger("planet.runner") + try: + log.warning + except: + log.warning = log.warn + + # timeoutsocket allows feedparser to time out rather than hang forever on + # ultra-slow servers. Python 2.3 now has this functionality available in + # the standard socket library, so under 2.3 you don't need to install + # anything. But you probably should anyway, because the socket module is + # buggy and timeoutsocket is better. + if feed_timeout: + try: + feed_timeout = float(feed_timeout) + except: + log.warning("Feed timeout set to invalid value '%s', skipping", feed_timeout) + feed_timeout = None + + if feed_timeout and not offline: + try: + from planet import timeoutsocket + timeoutsocket.setDefaultSocketTimeout(feed_timeout) + log.debug("Socket timeout set to %d seconds", feed_timeout) + except ImportError: + import socket + if hasattr(socket, 'setdefaulttimeout'): + log.debug("timeoutsocket not found, using python function") + socket.setdefaulttimeout(feed_timeout) + log.debug("Socket timeout set to %d seconds", feed_timeout) + else: + log.error("Unable to set timeout to %d seconds", feed_timeout) + + # run the planet + my_planet = planet.Planet(config) + my_planet.run(planet_name, planet_link, template_files, offline) + + #add the current channels to the db + channels = my_planet.channels() + for channel in channels: + + author_name = channel.name + + try: + author_face = channel.face + except: + author_face = None + try: + channel_subtitle = channel.subtitle + except: + channel_subtitle = None + try: + channel_title = channel.title + except: + channel_title = None + + channel_url = channel.url + + try: + channel_link = channel.link + except: + channel_link = None + + try: + channel_urlstatus = channel.url_status + except: + channel_urlstatus = None + + label = channel.label + label_personal = 0 + label_lkd = 0 + label_community = 0 + label_eng = 0 + if label == "Personal": + label_personal = 1 + if label == "LKD": + label_lkd = 1 + if label == "Community": + label_community = 1 + if label == "Eng": + label_eng = 1 + + id = channel.id + try: + author = Authors.objects.get(author_id=id) + + #update the values with the ones at the config file + author.author_name = author_name + #print author_name + author.author_face = author_face + author.channel_subtitle = channel_subtitle + author.channel_title = channel_title + author.channel_url = channel_url + author.channel_link = channel_link + author.channel_url_status = channel_urlstatus + author.label_personal = label_personal + author.label_lkd = label_lkd + author.label_community = label_community + author.label_eng = label_eng + + except Exception, ex: + #print ex + author = Authors(author_id=id, author_name=author_name, author_face=author_face, channel_subtitle=channel_subtitle, channel_title=channel_title, channel_url=channel_url, channel_link=channel_link, channel_urlstatus=channel_urlstatus, label_personal=label_personal, label_lkd=label_lkd, label_community=label_community, label_eng=label_eng) + + + author.save() + + #entry issues + items = channel.items() + for item in items: + id_hash = item.id_hash + + try: + entry = author.entries_set.get(id_hash = id_hash) + entry.title = item.title + entry.content_html = item.content + entry.content_text = entry.sanitize(item.content) + entry.summary = item.summary + entry.link = item.link + d = item.date + entry.date = datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5]) + except: + content_html = item.content + #content_text = entry.sanitize(content_html) + d = item.date + if not item.has_key('summary'): summary = None + else: summary = item.summary + entry = author.entries_set.create(id_hash=id_hash, title=item.title, content_html=item.content, summary=summary, link=item.link, date=datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5])) + entry.content_text = entry.sanitize(content_html) + + entry.save() + + #datetime issue + r = RunTime() + r.save() + + my_planet.generate_all_files(template_files, planet_name, + planet_link, planet_feed, owner_name, owner_email) + + +if __name__ == "__main__": + main() + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/__init__.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/__init__.py new file mode 100755 index 0000000..7829731 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/__init__.py @@ -0,0 +1,969 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Planet aggregator library. + +This package is a library for developing web sites or software that +aggregate RSS, CDF and Atom feeds taken from elsewhere into a single, +combined feed. +""" + +__version__ = "2.0" +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + +import locale + +# Modules available without separate import +import cache +import feedparser +import sanitize +import htmltmpl +import sgmllib +try: + import logging +except: + import compat_logging as logging + +# Limit the effect of "from planet import *" +__all__ = ("cache", "feedparser", "htmltmpl", "logging", + "Planet", "Channel", "NewsItem") + + +import os +import md5 +import time +import dbhash +import re + +try: + from xml.sax.saxutils import escape +except: + def escape(data): + return data.replace("&","&").replace(">",">").replace("<","<") + +# Version information (for generator headers) +VERSION = ("Planet/%s +http://www.planetplanet.org" % __version__) + +# Default User-Agent header to send when retreiving feeds +USER_AGENT = VERSION + " " + feedparser.USER_AGENT + +# Default cache directory +CACHE_DIRECTORY = "cache" + +# Default number of items to display from a new feed +NEW_FEED_ITEMS = 10 + +# Useful common date/time formats +TIMEFMT_ISO = "%Y-%m-%dT%H:%M:%S+00:00" +TIMEFMT_822 = "%a, %d %b %Y %H:%M:%S +0000" + + +# Log instance to use here +log = logging.getLogger("planet") +try: + log.warning +except: + log.warning = log.warn + +# Defaults for the template file config sections +ENCODING = "utf-8" +ITEMS_PER_PAGE = 60 +DAYS_PER_PAGE = 0 +OUTPUT_DIR = "output" +DATE_FORMAT = "%B %d, %Y %I:%M %p" +NEW_DATE_FORMAT = "%B %d, %Y" +ACTIVITY_THRESHOLD = 0 + +class stripHtml(sgmllib.SGMLParser): + "remove all tags from the data" + def __init__(self, data): + sgmllib.SGMLParser.__init__(self) + self.result='' + self.feed(data) + self.close() + def handle_data(self, data): + if data: self.result+=data + +def template_info(item, date_format): + """Produce a dictionary of template information.""" + info = {} + + #set the locale so that the dates at the feeds will be in english + lc=locale.getlocale() + if lc[0] == None: + try: + locale.setlocale(locale.LC_ALL, '') + except: + pass + elif lc[0].find("tr") != -1: + try: + locale.setlocale(locale.LC_ALL, '') + except: + pass + + for key in item.keys(): + if item.key_type(key) == item.DATE: + date = item.get_as_date(key) + info[key] = time.strftime(date_format, date) + info[key + "_iso"] = time.strftime(TIMEFMT_ISO, date) + info[key + "_822"] = time.strftime(TIMEFMT_822, date) + else: + info[key] = item[key] + if 'title' in item.keys(): + info['title_plain'] = stripHtml(info['title']).result + + return info + + +class Planet: + """A set of channels. + + This class represents a set of channels for which the items will + be aggregated together into one combined feed. + + Properties: + user_agent User-Agent header to fetch feeds with. + cache_directory Directory to store cached channels in. + new_feed_items Number of items to display from a new feed. + filter A regular expression that articles must match. + exclude A regular expression that articles must not match. + """ + def __init__(self, config): + self.config = config + + self._channels = [] + + self.user_agent = USER_AGENT + self.cache_directory = CACHE_DIRECTORY + self.new_feed_items = NEW_FEED_ITEMS + self.filter = None + self.exclude = None + + def tmpl_config_get(self, template, option, default=None, raw=0, vars=None): + """Get a template value from the configuration, with a default.""" + if self.config.has_option(template, option): + return self.config.get(template, option, raw=raw, vars=None) + elif self.config.has_option("Planet", option): + return self.config.get("Planet", option, raw=raw, vars=None) + else: + return default + + def gather_channel_info(self, template_file="Planet"): + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + + activity_threshold = int(self.tmpl_config_get(template_file, + "activity_threshold", + ACTIVITY_THRESHOLD)) + + if activity_threshold: + activity_horizon = \ + time.gmtime(time.time()-86400*activity_threshold) + else: + activity_horizon = 0 + + channels = {} + channels_list = [] + for channel in self.channels(hidden=1): + channels[channel] = template_info(channel, date_format) + channels_list.append(channels[channel]) + + # identify inactive feeds + if activity_horizon: + latest = channel.items(sorted=1) + if len(latest)==0 or latest[0].date < activity_horizon: + channels[channel]["message"] = \ + "no activity in %d days" % activity_threshold + + # report channel level errors + if not channel.url_status: continue + status = int(channel.url_status) + if status == 403: + channels[channel]["message"] = "403: forbidden" + elif status == 404: + channels[channel]["message"] = "404: not found" + elif status == 408: + channels[channel]["message"] = "408: request timeout" + elif status == 410: + channels[channel]["message"] = "410: gone" + elif status == 500: + channels[channel]["message"] = "internal server error" + elif status >= 400: + channels[channel]["message"] = "http status %s" % status + + return channels, channels_list + + def gather_items_info(self, channels, template_file="Planet", channel_list=None): + items_list = [] + prev_date = [] + prev_channel = None + + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + items_per_page = int(self.tmpl_config_get(template_file, + "items_per_page", ITEMS_PER_PAGE)) + days_per_page = int(self.tmpl_config_get(template_file, + "days_per_page", DAYS_PER_PAGE)) + new_date_format = self.tmpl_config_get(template_file, + "new_date_format", NEW_DATE_FORMAT, raw=1) + + for newsitem in self.items(max_items=items_per_page, + max_days=days_per_page, + channels=channel_list): + item_info = template_info(newsitem, date_format) + chan_info = channels[newsitem._channel] + for k, v in chan_info.items(): + item_info["channel_" + k] = v + + # Check for the start of a new day + if prev_date[:3] != newsitem.date[:3]: + prev_date = newsitem.date + item_info["new_date"] = time.strftime(new_date_format, + newsitem.date) + + # Check for the start of a new channel + if item_info.has_key("new_date") \ + or prev_channel != newsitem._channel: + prev_channel = newsitem._channel + item_info["new_channel"] = newsitem._channel.url + + items_list.append(item_info) + + return items_list + + def run(self, planet_name, planet_link, template_files, offline = False): + log = logging.getLogger("planet.runner") + + # Create a planet + log.info("Loading cached data") + if self.config.has_option("Planet", "cache_directory"): + self.cache_directory = self.config.get("Planet", "cache_directory") + if self.config.has_option("Planet", "new_feed_items"): + self.new_feed_items = int(self.config.get("Planet", "new_feed_items")) + self.user_agent = "%s +%s %s" % (planet_name, planet_link, + self.user_agent) + if self.config.has_option("Planet", "filter"): + self.filter = self.config.get("Planet", "filter") + + # The other configuration blocks are channels to subscribe to + for feed_url in self.config.sections(): + if feed_url == "Planet" or feed_url in template_files: + continue + log.info(feed_url) + # Create a channel, configure it and subscribe it + channel = Channel(self, feed_url) + self.subscribe(channel) + + # Update it + try: + if not offline and not channel.url_status == '410': + channel.update() + except KeyboardInterrupt: + raise + except: + log.exception("Update of <%s> failed", feed_url) + + def generate_all_files(self, template_files, planet_name, + planet_link, planet_feed, owner_name, owner_email): + + log = logging.getLogger("planet.runner") + # Go-go-gadget-template + for template_file in template_files: + manager = htmltmpl.TemplateManager() + log.info("Processing template %s", template_file) + try: + template = manager.prepare(template_file) + except htmltmpl.TemplateError: + template = manager.prepare(os.path.basename(template_file)) + # Read the configuration + output_dir = self.tmpl_config_get(template_file, + "output_dir", OUTPUT_DIR) + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + encoding = self.tmpl_config_get(template_file, "encoding", ENCODING) + + # We treat each template individually + base = os.path.splitext(os.path.basename(template_file))[0] + url = os.path.join(planet_link, base) + output_file = os.path.join(output_dir, base) + + # Gather information + channels, channels_list = self.gather_channel_info(template_file) + items_list = self.gather_items_info(channels, template_file) + + # Gather item information + + # Process the template + tp = htmltmpl.TemplateProcessor(html_escape=0) + tp.set("Items", items_list) + tp.set("Channels", channels_list) + + # Generic information + tp.set("generator", VERSION) + tp.set("name", planet_name) + tp.set("link", planet_link) + tp.set("owner_name", owner_name) + tp.set("owner_email", owner_email) + tp.set("url", url) + + if planet_feed: + tp.set("feed", planet_feed) + tp.set("feedtype", planet_feed.find('rss')>=0 and 'rss' or 'atom') + + # Update time + date = time.localtime() + tp.set("date", time.strftime(date_format, date)) + tp.set("date_iso", time.strftime(TIMEFMT_ISO, date)) + tp.set("date_822", time.strftime(TIMEFMT_822, date)) + + try: + log.info("Writing %s", output_file) + output_fd = open(output_file, "w") + if encoding.lower() in ("utf-8", "utf8"): + # UTF-8 output is the default because we use that internally + output_fd.write(tp.process(template)) + elif encoding.lower() in ("xml", "html", "sgml"): + # Magic for Python 2.3 users + output = tp.process(template).decode("utf-8") + output_fd.write(output.encode("ascii", "xmlcharrefreplace")) + else: + # Must be a "known" encoding + output = tp.process(template).decode("utf-8") + output_fd.write(output.encode(encoding, "replace")) + output_fd.close() + except KeyboardInterrupt: + raise + except: + log.exception("Write of %s failed", output_file) + + def channels(self, hidden=0, sorted=1): + """Return the list of channels.""" + channels = [] + for channel in self._channels: + if hidden or not channel.has_key("hidden"): + channels.append((channel.name, channel)) + + if sorted: + channels.sort() + + return [ c[-1] for c in channels ] + + def find_by_basename(self, basename): + for channel in self._channels: + if basename == channel.cache_basename(): return channel + + def subscribe(self, channel): + """Subscribe the planet to the channel.""" + self._channels.append(channel) + + def unsubscribe(self, channel): + """Unsubscribe the planet from the channel.""" + self._channels.remove(channel) + + def items(self, hidden=0, sorted=1, max_items=0, max_days=0, channels=None): + """Return an optionally filtered list of items in the channel. + + The filters are applied in the following order: + + If hidden is true then items in hidden channels and hidden items + will be returned. + + If sorted is true then the item list will be sorted with the newest + first. + + If max_items is non-zero then this number of items, at most, will + be returned. + + If max_days is non-zero then any items older than the newest by + this number of days won't be returned. Requires sorted=1 to work. + + + The sharp-eyed will note that this looks a little strange code-wise, + it turns out that Python gets *really* slow if we try to sort the + actual items themselves. Also we use mktime here, but it's ok + because we discard the numbers and just need them to be relatively + consistent between each other. + """ + planet_filter_re = None + if self.filter: + planet_filter_re = re.compile(self.filter, re.I) + planet_exclude_re = None + if self.exclude: + planet_exclude_re = re.compile(self.exclude, re.I) + + items = [] + seen_guids = {} + if not channels: channels=self.channels(hidden=hidden, sorted=0) + for channel in channels: + for item in channel._items.values(): + if hidden or not item.has_key("hidden"): + + channel_filter_re = None + if channel.filter: + channel_filter_re = re.compile(channel.filter, + re.I) + channel_exclude_re = None + if channel.exclude: + channel_exclude_re = re.compile(channel.exclude, + re.I) + if (planet_filter_re or planet_exclude_re \ + or channel_filter_re or channel_exclude_re): + title = "" + if item.has_key("title"): + title = item.title + content = item.get_content("content") + + if planet_filter_re: + if not (planet_filter_re.search(title) \ + or planet_filter_re.search(content)): + continue + + if planet_exclude_re: + if (planet_exclude_re.search(title) \ + or planet_exclude_re.search(content)): + continue + + if channel_filter_re: + if not (channel_filter_re.search(title) \ + or channel_filter_re.search(content)): + continue + + if channel_exclude_re: + if (channel_exclude_re.search(title) \ + or channel_exclude_re.search(content)): + continue + + if not seen_guids.has_key(item.id): + seen_guids[item.id] = 1; + items.append((time.mktime(item.date), item.order, item)) + + # Sort the list + if sorted: + items.sort() + items.reverse() + + # Apply max_items filter + if len(items) and max_items: + items = items[:max_items] + + # Apply max_days filter + if len(items) and max_days: + max_count = 0 + max_time = items[0][0] - max_days * 84600 + for item in items: + if item[0] > max_time: + max_count += 1 + else: + items = items[:max_count] + break + + return [ i[-1] for i in items ] + +class Channel(cache.CachedInfo): + """A list of news items. + + This class represents a list of news items taken from the feed of + a website or other source. + + Properties: + url URL of the feed. + url_etag E-Tag of the feed URL. + url_modified Last modified time of the feed URL. + url_status Last HTTP status of the feed URL. + hidden Channel should be hidden (True if exists). + name Name of the feed owner, or feed title. + next_order Next order number to be assigned to NewsItem + + updated Correct UTC-Normalised update time of the feed. + last_updated Correct UTC-Normalised time the feed was last updated. + + id An identifier the feed claims is unique (*). + title One-line title (*). + link Link to the original format feed (*). + tagline Short description of the feed (*). + info Longer description of the feed (*). + + modified Date the feed claims to have been modified (*). + + author Name of the author (*). + publisher Name of the publisher (*). + generator Name of the feed generator (*). + category Category name (*). + copyright Copyright information for humans to read (*). + license Link to the licence for the content (*). + docs Link to the specification of the feed format (*). + language Primary language (*). + errorreportsto E-Mail address to send error reports to (*). + + image_url URL of an associated image (*). + image_link Link to go with the associated image (*). + image_title Alternative text of the associated image (*). + image_width Width of the associated image (*). + image_height Height of the associated image (*). + + filter A regular expression that articles must match. + exclude A regular expression that articles must not match. + + Properties marked (*) will only be present if the original feed + contained them. Note that the optional 'modified' date field is simply + a claim made by the item and parsed from the information given, 'updated' + (and 'last_updated') are far more reliable sources of information. + + Some feeds may define additional properties to those above. + """ + IGNORE_KEYS = ("links", "contributors", "textinput", "cloud", "categories", + "url", "href", "url_etag", "url_modified", "tags", "itunes_explicit") + + def __init__(self, planet, url): + if not os.path.isdir(planet.cache_directory): + os.makedirs(planet.cache_directory) + cache_filename = cache.filename(planet.cache_directory, url) + cache_file = dbhash.open(cache_filename, "c", 0666) + + cache.CachedInfo.__init__(self, cache_file, url, root=1) + + self._items = {} + self._planet = planet + self._expired = [] + self.url = url + # retain the original URL for error reporting + self.configured_url = url + self.url_etag = None + self.url_status = None + self.url_modified = None + self.name = None + self.updated = None + self.last_updated = None + self.filter = None + self.exclude = None + self.next_order = "0" + self.cache_read() + self.cache_read_entries() + + if planet.config.has_section(url): + for option in planet.config.options(url): + value = planet.config.get(url, option) + self.set_as_string(option, value, cached=0) + + def has_item(self, id_): + """Check whether the item exists in the channel.""" + return self._items.has_key(id_) + + def get_item(self, id_): + """Return the item from the channel.""" + return self._items[id_] + + # Special methods + __contains__ = has_item + + def items(self, hidden=0, sorted=0): + """Return the item list.""" + items = [] + for item in self._items.values(): + if hidden or not item.has_key("hidden"): + items.append((time.mktime(item.date), item.order, item)) + + if sorted: + items.sort() + items.reverse() + + return [ i[-1] for i in items ] + + def __iter__(self): + """Iterate the sorted item list.""" + return iter(self.items(sorted=1)) + + def cache_read_entries(self): + """Read entry information from the cache.""" + keys = self._cache.keys() + for key in keys: + if key.find(" ") != -1: continue + if self.has_key(key): continue + + item = NewsItem(self, key) + self._items[key] = item + + def cache_basename(self): + return cache.filename('',self._id) + + def cache_write(self, sync=1): + + """Write channel and item information to the cache.""" + for item in self._items.values(): + item.cache_write(sync=0) + for item in self._expired: + item.cache_clear(sync=0) + cache.CachedInfo.cache_write(self, sync) + + self._expired = [] + + def feed_information(self): + """ + Returns a description string for the feed embedded in this channel. + + This will usually simply be the feed url embedded in <>, but in the + case where the current self.url has changed from the original + self.configured_url the string will contain both pieces of information. + This is so that the URL in question is easier to find in logging + output: getting an error about a URL that doesn't appear in your config + file is annoying. + """ + if self.url == self.configured_url: + return "<%s>" % self.url + else: + return "<%s> (formerly <%s>)" % (self.url, self.configured_url) + + def update(self): + """Download the feed to refresh the information. + + This does the actual work of pulling down the feed and if it changes + updates the cached information about the feed and entries within it. + """ + info = feedparser.parse(self.url, + etag=self.url_etag, modified=self.url_modified, + agent=self._planet.user_agent) + if info.has_key("status"): + self.url_status = str(info.status) + elif info.has_key("entries") and len(info.entries)>0: + self.url_status = str(200) + elif info.bozo and info.bozo_exception.__class__.__name__=='Timeout': + self.url_status = str(408) + else: + self.url_status = str(500) + + if self.url_status == '301' and \ + (info.has_key("entries") and len(info.entries)>0): + log.warning("Feed has moved from <%s> to <%s>", self.url, info.url) + try: + os.link(cache.filename(self._planet.cache_directory, self.url), + cache.filename(self._planet.cache_directory, info.url)) + except: + pass + self.url = info.url + elif self.url_status == '304': + log.info("Feed %s unchanged", self.feed_information()) + return + elif self.url_status == '410': + log.info("Feed %s gone", self.feed_information()) + self.cache_write() + return + elif self.url_status == '408': + log.warning("Feed %s timed out", self.feed_information()) + return + elif int(self.url_status) >= 400: + log.error("Error %s while updating feed %s", + self.url_status, self.feed_information()) + return + else: + log.info("Updating feed %s", self.feed_information()) + + self.url_etag = info.has_key("etag") and info.etag or None + self.url_modified = info.has_key("modified") and info.modified or None + if self.url_etag is not None: + log.debug("E-Tag: %s", self.url_etag) + if self.url_modified is not None: + log.debug("Last Modified: %s", + time.strftime(TIMEFMT_ISO, self.url_modified)) + + self.update_info(info.feed) + self.update_entries(info.entries) + self.cache_write() + + def update_info(self, feed): + """Update information from the feed. + + This reads the feed information supplied by feedparser and updates + the cached information about the feed. These are the various + potentially interesting properties that you might care about. + """ + for key in feed.keys(): + if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS: + # Ignored fields + pass + elif feed.has_key(key + "_parsed"): + # Ignore unparsed date fields + pass + elif key.endswith("_detail"): + # retain name and email sub-fields + if feed[key].has_key('name') and feed[key].name: + self.set_as_string(key.replace("_detail","_name"), \ + feed[key].name) + if feed[key].has_key('email') and feed[key].email: + self.set_as_string(key.replace("_detail","_email"), \ + feed[key].email) + elif key == "items": + # Ignore items field + pass + elif key.endswith("_parsed"): + # Date fields + if feed[key] is not None: + self.set_as_date(key[:-len("_parsed")], feed[key]) + elif key == "image": + # Image field: save all the information + if feed[key].has_key("url"): + self.set_as_string(key + "_url", feed[key].url) + if feed[key].has_key("link"): + self.set_as_string(key + "_link", feed[key].link) + if feed[key].has_key("title"): + self.set_as_string(key + "_title", feed[key].title) + if feed[key].has_key("width"): + self.set_as_string(key + "_width", str(feed[key].width)) + if feed[key].has_key("height"): + self.set_as_string(key + "_height", str(feed[key].height)) + elif isinstance(feed[key], (str, unicode)): + # String fields + try: + detail = key + '_detail' + if feed.has_key(detail) and feed[detail].has_key('type'): + if feed[detail].type == 'text/html': + feed[key] = sanitize.HTML(feed[key]) + elif feed[detail].type == 'text/plain': + feed[key] = escape(feed[key]) + self.set_as_string(key, feed[key]) + except KeyboardInterrupt: + raise + except: + log.exception("Ignored '%s' of <%s>, unknown format", + key, self.url) + + def update_entries(self, entries): + """Update entries from the feed. + + This reads the entries supplied by feedparser and updates the + cached information about them. It's at this point we update + the 'updated' timestamp and keep the old one in 'last_updated', + these provide boundaries for acceptable entry times. + + If this is the first time a feed has been updated then most of the + items will be marked as hidden, according to Planet.new_feed_items. + + If the feed does not contain items which, according to the sort order, + should be there; those items are assumed to have been expired from + the feed or replaced and are removed from the cache. + """ + if not len(entries): + return + + self.last_updated = self.updated + self.updated = time.gmtime() + + new_items = [] + feed_items = [] + for entry in entries: + # Try really hard to find some kind of unique identifier + if entry.has_key("id"): + entry_id = cache.utf8(entry.id) + elif entry.has_key("link"): + entry_id = cache.utf8(entry.link) + elif entry.has_key("title"): + entry_id = (self.url + "/" + + md5.new(cache.utf8(entry.title)).hexdigest()) + elif entry.has_key("summary"): + entry_id = (self.url + "/" + + md5.new(cache.utf8(entry.summary)).hexdigest()) + else: + log.error("Unable to find or generate id, entry ignored") + continue + + # Create the item if necessary and update + if self.has_item(entry_id): + item = self._items[entry_id] + else: + item = NewsItem(self, entry_id) + self._items[entry_id] = item + new_items.append(item) + item.update(entry) + feed_items.append(entry_id) + + # Hide excess items the first time through + if self.last_updated is None and self._planet.new_feed_items \ + and len(feed_items) > self._planet.new_feed_items: + item.hidden = "yes" + log.debug("Marked <%s> as hidden (new feed)", entry_id) + + # Assign order numbers in reverse + new_items.reverse() + for item in new_items: + item.order = self.next_order = str(int(self.next_order) + 1) + + # Check for expired or replaced items + feed_count = len(feed_items) + log.debug("Items in Feed: %d", feed_count) + for item in self.items(sorted=1): + if feed_count < 1: + break + elif item.id in feed_items: + feed_count -= 1 + elif item._channel.url_status != '226': + del(self._items[item.id]) + self._expired.append(item) + log.debug("Removed expired or replaced item <%s>", item.id) + + def get_name(self, key): + """Return the key containing the name.""" + for key in ("name", "title"): + if self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_string(key) + + return "" + +class NewsItem(cache.CachedInfo): + """An item of news. + + This class represents a single item of news on a channel. They're + created by members of the Channel class and accessible through it. + + Properties: + id Channel-unique identifier for this item. + id_hash Relatively short, printable cryptographic hash of id + date Corrected UTC-Normalised update time, for sorting. + order Order in which items on the same date can be sorted. + hidden Item should be hidden (True if exists). + + title One-line title (*). + link Link to the original format text (*). + summary Short first-page summary (*). + content Full HTML content. + + modified Date the item claims to have been modified (*). + issued Date the item claims to have been issued (*). + created Date the item claims to have been created (*). + expired Date the item claims to expire (*). + + author Name of the author (*). + publisher Name of the publisher (*). + category Category name (*). + comments Link to a page to enter comments (*). + license Link to the licence for the content (*). + source_name Name of the original source of this item (*). + source_link Link to the original source of this item (*). + + Properties marked (*) will only be present if the original feed + contained them. Note that the various optional date fields are + simply claims made by the item and parsed from the information + given, 'date' is a far more reliable source of information. + + Some feeds may define additional properties to those above. + """ + IGNORE_KEYS = ("categories", "contributors", "enclosures", "links", + "guidislink", "date", "tags") + + def __init__(self, channel, id_): + cache.CachedInfo.__init__(self, channel._cache, id_) + + self._channel = channel + self.id = id_ + self.id_hash = md5.new(id_).hexdigest() + self.date = None + self.order = None + self.content = None + self.cache_read() + + def update(self, entry): + """Update the item from the feedparser entry given.""" + for key in entry.keys(): + if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS: + # Ignored fields + pass + elif entry.has_key(key + "_parsed"): + # Ignore unparsed date fields + pass + elif key.endswith("_detail"): + # retain name, email, and language sub-fields + if entry[key].has_key('name') and entry[key].name: + self.set_as_string(key.replace("_detail","_name"), \ + entry[key].name) + if entry[key].has_key('email') and entry[key].email: + self.set_as_string(key.replace("_detail","_email"), \ + entry[key].email) + if entry[key].has_key('language') and entry[key].language and \ + (not self._channel.has_key('language') or \ + entry[key].language != self._channel.language): + self.set_as_string(key.replace("_detail","_language"), \ + entry[key].language) + elif key.endswith("_parsed"): + # Date fields + if entry[key] is not None: + self.set_as_date(key[:-len("_parsed")], entry[key]) + elif key == "source": + # Source field: save both url and value + if entry[key].has_key("value"): + self.set_as_string(key + "_name", entry[key].value) + if entry[key].has_key("url"): + self.set_as_string(key + "_link", entry[key].url) + elif key == "content": + # Content field: concatenate the values + value = "" + for item in entry[key]: + if item.type == 'text/html': + item.value = sanitize.HTML(item.value) + elif item.type == 'text/plain': + item.value = escape(item.value) + if item.has_key('language') and item.language and \ + (not self._channel.has_key('language') or + item.language != self._channel.language) : + self.set_as_string(key + "_language", item.language) + value += cache.utf8(item.value) + self.set_as_string(key, value) + elif isinstance(entry[key], (str, unicode)): + # String fields + try: + detail = key + '_detail' + if entry.has_key(detail): + if entry[detail].has_key('type'): + if entry[detail].type == 'text/html': + entry[key] = sanitize.HTML(entry[key]) + elif entry[detail].type == 'text/plain': + entry[key] = escape(entry[key]) + self.set_as_string(key, entry[key]) + except KeyboardInterrupt: + raise + except: + log.exception("Ignored '%s' of <%s>, unknown format", + key, self.id) + + # Generate the date field if we need to + self.get_date("date") + + def get_date(self, key): + """Get (or update) the date key. + + We check whether the date the entry claims to have been changed is + since we last updated this feed and when we pulled the feed off the + site. + + If it is then it's probably not bogus, and we'll sort accordingly. + + If it isn't then we bound it appropriately, this ensures that + entries appear in posting sequence but don't overlap entries + added in previous updates and don't creep into the next one. + """ + + for other_key in ("updated", "modified", "published", "issued", "created"): + if self.has_key(other_key): + date = self.get_as_date(other_key) + break + else: + date = None + + if date is not None: + if date > self._channel.updated: + date = self._channel.updated +# elif date < self._channel.last_updated: +# date = self._channel.updated + elif self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_date(key) + else: + date = self._channel.updated + + self.set_as_date(key, date) + return date + + def get_content(self, key): + """Return the key containing the content.""" + for key in ("content", "tagline", "summary"): + if self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_string(key) + + return "" diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/atomstyler.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/atomstyler.py new file mode 100755 index 0000000..9220702 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/atomstyler.py @@ -0,0 +1,124 @@ +from xml.dom import minidom, Node +from urlparse import urlparse, urlunparse +from xml.parsers.expat import ExpatError +from htmlentitydefs import name2codepoint +import re + +# select and apply an xml:base for this entry +class relativize: + def __init__(self, parent): + self.score = {} + self.links = [] + self.collect_and_tally(parent) + self.base = self.select_optimal_base() + if self.base: + if not parent.hasAttribute('xml:base'): + self.rebase(parent) + parent.setAttribute('xml:base', self.base) + + # collect and tally cite, href and src attributes + def collect_and_tally(self,parent): + uri = None + if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') + if parent.hasAttribute('href'): uri=parent.getAttribute('href') + if parent.hasAttribute('src'): uri=parent.getAttribute('src') + + if uri: + parts=urlparse(uri) + if parts[0].lower() == 'http': + parts = (parts[1]+parts[2]).split('/') + base = None + for i in range(1,len(parts)): + base = tuple(parts[0:i]) + self.score[base] = self.score.get(base,0) + len(base) + if base and base not in self.links: self.links.append(base) + + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + self.collect_and_tally(node) + + # select the xml:base with the highest score + def select_optimal_base(self): + if not self.score: return None + for link in self.links: + self.score[link] = 0 + winner = max(self.score.values()) + if not winner: return None + for key in self.score.keys(): + if self.score[key] == winner: + if winner == len(key): return None + return urlunparse(('http', key[0], '/'.join(key[1:]), '', '', '')) + '/' + + # rewrite cite, href and src attributes using this base + def rebase(self,parent): + uri = None + if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') + if parent.hasAttribute('href'): uri=parent.getAttribute('href') + if parent.hasAttribute('src'): uri=parent.getAttribute('src') + if uri and uri.startswith(self.base): + uri = uri[len(self.base):] or '.' + if parent.hasAttribute('href'): uri=parent.setAttribute('href', uri) + if parent.hasAttribute('src'): uri=parent.setAttribute('src', uri) + + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + self.rebase(node) + +# convert type="html" to type="plain" or type="xhtml" as appropriate +def retype(parent): + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + + if node.hasAttribute('type') and node.getAttribute('type') == 'html': + if len(node.childNodes)==0: + node.removeAttribute('type') + elif len(node.childNodes)==1: + + # replace html entity defs with utf-8 + chunks=re.split('&(\w+);', node.childNodes[0].nodeValue) + for i in range(1,len(chunks),2): + if chunks[i] in ['amp', 'lt', 'gt', 'apos', 'quot']: + chunks[i] ='&' + chunks[i] +';' + elif chunks[i] in name2codepoint: + chunks[i]=unichr(name2codepoint[chunks[i]]) + else: + chunks[i]='&' + chunks[i] + ';' + text = u"".join(chunks) + + try: + # see if the resulting text is a well-formed XML fragment + div = '
%s
' + data = minidom.parseString((div % text.encode('utf-8'))) + + if text.find('<') < 0: + # plain text + node.removeAttribute('type') + text = data.documentElement.childNodes[0].nodeValue + node.childNodes[0].replaceWholeText(text) + + elif len(text) > 80: + # xhtml + node.setAttribute('type', 'xhtml') + node.removeChild(node.childNodes[0]) + node.appendChild(data.documentElement) + + except ExpatError: + # leave as html + pass + + else: + # recurse + retype(node) + + if parent.nodeName == 'entry': + relativize(parent) + +if __name__ == '__main__': + + # run styler on each file mention on the command line + import sys + for feed in sys.argv[1:]: + doc = minidom.parse(feed) + doc.normalize() + retype(doc.documentElement) + open(feed,'w').write(doc.toxml('utf-8')) diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/cache.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/cache.py new file mode 100755 index 0000000..dfc529b --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/cache.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Item cache. + +Between runs of Planet we need somewhere to store the feed information +we parsed, this is so we don't lose information when a particular feed +goes away or is too short to hold enough items. + +This module provides the code to handle this cache transparently enough +that the rest of the code can take the persistance for granted. +""" + +import os +import re + + +# Regular expressions to sanitise cache filenames +re_url_scheme = re.compile(r'^[^:]*://') +re_slash = re.compile(r'[?/]+') +re_initial_cruft = re.compile(r'^[,.]*') +re_final_cruft = re.compile(r'[,.]*$') + + +class CachedInfo: + """Cached information. + + This class is designed to hold information that is stored in a cache + between instances. It can act both as a dictionary (c['foo']) and + as an object (c.foo) to get and set values and supports both string + and date values. + + If you wish to support special fields you can derive a class off this + and implement get_FIELD and set_FIELD functions which will be + automatically called. + """ + STRING = "string" + DATE = "date" + NULL = "null" + + def __init__(self, cache, id_, root=0): + self._type = {} + self._value = {} + self._cached = {} + + self._cache = cache + self._id = id_.replace(" ", "%20") + self._root = root + + def cache_key(self, key): + """Return the cache key name for the given key.""" + key = key.replace(" ", "_") + if self._root: + return key + else: + return self._id + " " + key + + def cache_read(self): + """Read information from the cache.""" + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + if self._cache.has_key(keys_key): + keys = self._cache[keys_key].split(" ") + else: + return + + for key in keys: + cache_key = self.cache_key(key) + if not self._cached.has_key(key) or self._cached[key]: + # Key either hasn't been loaded, or is one for the cache + self._value[key] = self._cache[cache_key] + self._type[key] = self._cache[cache_key + " type"] + self._cached[key] = 1 + + def cache_write(self, sync=1): + """Write information to the cache.""" + self.cache_clear(sync=0) + + keys = [] + for key in self.keys(): + cache_key = self.cache_key(key) + if not self._cached[key]: + if self._cache.has_key(cache_key): + # Non-cached keys need to be cleared + del(self._cache[cache_key]) + del(self._cache[cache_key + " type"]) + continue + + keys.append(key) + self._cache[cache_key] = self._value[key] + self._cache[cache_key + " type"] = self._type[key] + + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + self._cache[keys_key] = " ".join(keys) + if sync: + self._cache.sync() + + def cache_clear(self, sync=1): + """Remove information from the cache.""" + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + if self._cache.has_key(keys_key): + keys = self._cache[keys_key].split(" ") + del(self._cache[keys_key]) + else: + return + + for key in keys: + cache_key = self.cache_key(key) + del(self._cache[cache_key]) + del(self._cache[cache_key + " type"]) + + if sync: + self._cache.sync() + + def has_key(self, key): + """Check whether the key exists.""" + key = key.replace(" ", "_") + return self._value.has_key(key) + + def key_type(self, key): + """Return the key type.""" + key = key.replace(" ", "_") + return self._type[key] + + def set(self, key, value, cached=1): + """Set the value of the given key. + + If a set_KEY function exists that is called otherwise the + string function is called and the date function if that fails + (it nearly always will). + """ + key = key.replace(" ", "_") + + try: + func = getattr(self, "set_" + key) + except AttributeError: + pass + else: + return func(key, value) + + if value == None: + return self.set_as_null(key, value) + else: + try: + return self.set_as_string(key, value) + except TypeError: + return self.set_as_date(key, value) + + def get(self, key): + """Return the value of the given key. + + If a get_KEY function exists that is called otherwise the + correctly typed function is called if that exists. + """ + key = key.replace(" ", "_") + + try: + func = getattr(self, "get_" + key) + except AttributeError: + pass + else: + return func(key) + + try: + func = getattr(self, "get_as_" + self._type[key]) + except AttributeError: + pass + else: + return func(key) + + return self._value[key] + + def set_as_string(self, key, value, cached=1): + """Set the key to the string value. + + The value is converted to UTF-8 if it is a Unicode string, otherwise + it's assumed to have failed decoding (feedparser tries pretty hard) + so has all non-ASCII characters stripped. + """ + value = utf8(value) + + key = key.replace(" ", "_") + self._value[key] = value + self._type[key] = self.STRING + self._cached[key] = cached + + def get_as_string(self, key): + """Return the key as a string value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + return self._value[key] + + def set_as_date(self, key, value, cached=1): + """Set the key to the date value. + + The date should be a 9-item tuple as returned by time.gmtime(). + """ + value = " ".join([ str(s) for s in value ]) + + key = key.replace(" ", "_") + self._value[key] = value + self._type[key] = self.DATE + self._cached[key] = cached + + def get_as_date(self, key): + """Return the key as a date value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + value = self._value[key] + return tuple([ int(i) for i in value.split(" ") ]) + + def set_as_null(self, key, value, cached=1): + """Set the key to the null value. + + This only exists to make things less magic. + """ + key = key.replace(" ", "_") + self._value[key] = "" + self._type[key] = self.NULL + self._cached[key] = cached + + def get_as_null(self, key): + """Return the key as the null value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + return None + + def del_key(self, key): + """Delete the given key.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + del(self._value[key]) + del(self._type[key]) + del(self._cached[key]) + + def keys(self): + """Return the list of cached keys.""" + return self._value.keys() + + def __iter__(self): + """Iterate the cached keys.""" + return iter(self._value.keys()) + + # Special methods + __contains__ = has_key + __setitem__ = set_as_string + __getitem__ = get + __delitem__ = del_key + __delattr__ = del_key + + def __setattr__(self, key, value): + if key.startswith("_"): + self.__dict__[key] = value + else: + self.set(key, value) + + def __getattr__(self, key): + if self.has_key(key): + return self.get(key) + else: + raise AttributeError, key + + +def filename(directory, filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + filename = re_initial_cruft.sub("", filename) + filename = re_final_cruft.sub("", filename) + + return os.path.join(directory, filename) + +def utf8(value): + """Return the value as a UTF-8 string.""" + if type(value) == type(u''): + return value.encode("utf-8") + else: + try: + return unicode(value, "utf-8").encode("utf-8") + except UnicodeError: + try: + return unicode(value, "iso-8859-1").encode("utf-8") + except UnicodeError: + return unicode(value, "ascii", "replace").encode("utf-8") diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/compat_logging/__init__.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/compat_logging/__init__.py new file mode 100755 index 0000000..3bd0c6d --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/compat_logging/__init__.py @@ -0,0 +1,1196 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'sys._getframe()' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, os, types, time, string, cStringIO + +try: + import thread + import threading +except ImportError: + thread = None + +__author__ = "Vinay Sajip " +__status__ = "beta" +__version__ = "0.4.8.1" +__date__ = "26 June 2003" + +#--------------------------------------------------------------------------- +# Miscellaneous module data +#--------------------------------------------------------------------------- + +# +#_srcfile is used when walking the stack to check when we've got the first +# caller stack frame. +# +if string.lower(__file__[-4:]) in ['.pyc', '.pyo']: + _srcfile = __file__[:-4] + '.py' +else: + _srcfile = __file__ +_srcfile = os.path.normcase(_srcfile) + +# _srcfile is only used in conjunction with sys._getframe(). +# To provide compatibility with older versions of Python, set _srcfile +# to None if _getframe() is not available; this value will prevent +# findCaller() from being called. +if not hasattr(sys, "_getframe"): + _srcfile = None + +# +#_startTime is used as the base when calculating the relative time of events +# +_startTime = time.time() + +# +#raiseExceptions is used to see if exceptions during handling should be +#propagated +# +raiseExceptions = 1 + +#--------------------------------------------------------------------------- +# Level related stuff +#--------------------------------------------------------------------------- +# +# Default levels and level names, these can be replaced with any positive set +# of values having corresponding names. There is a pseudo-level, NOTSET, which +# is only really there as a lower limit for user-defined levels. Handlers and +# loggers are initialized with NOTSET so that they will log all messages, even +# at user-defined levels. +# +CRITICAL = 50 +FATAL = CRITICAL +ERROR = 40 +WARNING = 30 +WARN = WARNING +INFO = 20 +DEBUG = 10 +NOTSET = 0 + +_levelNames = { + CRITICAL : 'CRITICAL', + ERROR : 'ERROR', + WARNING : 'WARNING', + INFO : 'INFO', + DEBUG : 'DEBUG', + NOTSET : 'NOTSET', + 'CRITICAL' : CRITICAL, + 'ERROR' : ERROR, + 'WARN' : WARNING, + 'WARNING' : WARNING, + 'INFO' : INFO, + 'DEBUG' : DEBUG, + 'NOTSET' : NOTSET, +} + +def getLevelName(level): + """ + Return the textual representation of logging level 'level'. + + If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, + INFO, DEBUG) then you get the corresponding string. If you have + associated levels with names using addLevelName then the name you have + associated with 'level' is returned. Otherwise, the string + "Level %s" % level is returned. + """ + return _levelNames.get(level, ("Level %s" % level)) + +def addLevelName(level, levelName): + """ + Associate 'levelName' with 'level'. + + This is used when converting levels to text during message formatting. + """ + _acquireLock() + try: #unlikely to cause an exception, but you never know... + _levelNames[level] = levelName + _levelNames[levelName] = level + finally: + _releaseLock() + +#--------------------------------------------------------------------------- +# Thread-related stuff +#--------------------------------------------------------------------------- + +# +#_lock is used to serialize access to shared data structures in this module. +#This needs to be an RLock because fileConfig() creates Handlers and so +#might arbitrary user threads. Since Handler.__init__() updates the shared +#dictionary _handlers, it needs to acquire the lock. But if configuring, +#the lock would already have been acquired - so we need an RLock. +#The same argument applies to Loggers and Manager.loggerDict. +# +_lock = None + +def _acquireLock(): + """ + Acquire the module-level lock for serializing access to shared data. + + This should be released with _releaseLock(). + """ + global _lock + if (not _lock) and thread: + _lock = threading.RLock() + if _lock: + _lock.acquire() + +def _releaseLock(): + """ + Release the module-level lock acquired by calling _acquireLock(). + """ + if _lock: + _lock.release() + +#--------------------------------------------------------------------------- +# The logging record +#--------------------------------------------------------------------------- + +class LogRecord: + """ + A LogRecord instance represents an event being logged. + + LogRecord instances are created every time something is logged. They + contain all the information pertinent to the event being logged. The + main information passed in is in msg and args, which are combined + using str(msg) % args to create the message field of the record. The + record also includes information such as when the record was created, + the source line where the logging call was made, and any exception + information to be logged. + """ + def __init__(self, name, level, pathname, lineno, msg, args, exc_info): + """ + Initialize a logging record with interesting information. + """ + ct = time.time() + self.name = name + self.msg = msg + self.args = args + self.levelname = getLevelName(level) + self.levelno = level + self.pathname = pathname + try: + self.filename = os.path.basename(pathname) + self.module = os.path.splitext(self.filename)[0] + except: + self.filename = pathname + self.module = "Unknown module" + self.exc_info = exc_info + self.lineno = lineno + self.created = ct + self.msecs = (ct - long(ct)) * 1000 + self.relativeCreated = (self.created - _startTime) * 1000 + if thread: + self.thread = thread.get_ident() + else: + self.thread = None + if hasattr(os, 'getpid'): + self.process = os.getpid() + else: + self.process = None + + def __str__(self): + return ''%(self.name, self.levelno, + self.pathname, self.lineno, self.msg) + + def getMessage(self): + """ + Return the message for this LogRecord. + + Return the message for this LogRecord after merging any user-supplied + arguments with the message. + """ + if not hasattr(types, "UnicodeType"): #if no unicode support... + msg = str(self.msg) + else: + try: + msg = str(self.msg) + except UnicodeError: + msg = self.msg #Defer encoding till later + if self.args: + msg = msg % self.args + return msg + +def makeLogRecord(dict): + """ + Make a LogRecord whose attributes are defined by the specified dictionary, + This function is useful for converting a logging event received over + a socket connection (which is sent as a dictionary) into a LogRecord + instance. + """ + rv = LogRecord(None, None, "", 0, "", (), None) + rv.__dict__.update(dict) + return rv + +#--------------------------------------------------------------------------- +# Formatter classes and functions +#--------------------------------------------------------------------------- + +class Formatter: + """ + Formatter instances are used to convert a LogRecord to text. + + Formatters need to know how a LogRecord is constructed. They are + responsible for converting a LogRecord to (usually) a string which can + be interpreted by either a human or an external system. The base Formatter + allows a formatting string to be specified. If none is supplied, the + default value of "%s(message)\\n" is used. + + The Formatter can be initialized with a format string which makes use of + knowledge of the LogRecord attributes - e.g. the default value mentioned + above makes use of the fact that the user's message and arguments are pre- + formatted into a LogRecord's message attribute. Currently, the useful + attributes in a LogRecord are described by: + + %(name)s Name of the logger (logging channel) + %(levelno)s Numeric logging level for the message (DEBUG, INFO, + WARNING, ERROR, CRITICAL) + %(levelname)s Text logging level for the message ("DEBUG", "INFO", + "WARNING", "ERROR", "CRITICAL") + %(pathname)s Full pathname of the source file where the logging + call was issued (if available) + %(filename)s Filename portion of pathname + %(module)s Module (name portion of filename) + %(lineno)d Source line number where the logging call was issued + (if available) + %(created)f Time when the LogRecord was created (time.time() + return value) + %(asctime)s Textual time when the LogRecord was created + %(msecs)d Millisecond portion of the creation time + %(relativeCreated)d Time in milliseconds when the LogRecord was created, + relative to the time the logging module was loaded + (typically at application startup time) + %(thread)d Thread ID (if available) + %(process)d Process ID (if available) + %(message)s The result of record.getMessage(), computed just as + the record is emitted + """ + + converter = time.localtime + + def __init__(self, fmt=None, datefmt=None): + """ + Initialize the formatter with specified format strings. + + Initialize the formatter either with the specified format string, or a + default as described above. Allow for specialized date formatting with + the optional datefmt argument (if omitted, you get the ISO8601 format). + """ + if fmt: + self._fmt = fmt + else: + self._fmt = "%(message)s" + self.datefmt = datefmt + + def formatTime(self, record, datefmt=None): + """ + Return the creation time of the specified LogRecord as formatted text. + + This method should be called from format() by a formatter which + wants to make use of a formatted time. This method can be overridden + in formatters to provide for any specific requirement, but the + basic behaviour is as follows: if datefmt (a string) is specified, + it is used with time.strftime() to format the creation time of the + record. Otherwise, the ISO8601 format is used. The resulting + string is returned. This function uses a user-configurable function + to convert the creation time to a tuple. By default, time.localtime() + is used; to change this for a particular formatter instance, set the + 'converter' attribute to a function with the same signature as + time.localtime() or time.gmtime(). To change it for all formatters, + for example if you want all logging times to be shown in GMT, + set the 'converter' attribute in the Formatter class. + """ + ct = self.converter(record.created) + if datefmt: + s = time.strftime(datefmt, ct) + else: + t = time.strftime("%Y-%m-%d %H:%M:%S", ct) + s = "%s,%03d" % (t, record.msecs) + return s + + def formatException(self, ei): + """ + Format and return the specified exception information as a string. + + This default implementation just uses + traceback.print_exception() + """ + import traceback + sio = cStringIO.StringIO() + traceback.print_exception(ei[0], ei[1], ei[2], None, sio) + s = sio.getvalue() + sio.close() + if s[-1] == "\n": + s = s[:-1] + return s + + def format(self, record): + """ + Format the specified record as text. + + The record's attribute dictionary is used as the operand to a + string formatting operation which yields the returned string. + Before formatting the dictionary, a couple of preparatory steps + are carried out. The message attribute of the record is computed + using LogRecord.getMessage(). If the formatting string contains + "%(asctime)", formatTime() is called to format the event time. + If there is exception information, it is formatted using + formatException() and appended to the message. + """ + record.message = record.getMessage() + if string.find(self._fmt,"%(asctime)") >= 0: + record.asctime = self.formatTime(record, self.datefmt) + s = self._fmt % record.__dict__ + if record.exc_info: + if s[-1] != "\n": + s = s + "\n" + s = s + self.formatException(record.exc_info) + return s + +# +# The default formatter to use when no other is specified +# +_defaultFormatter = Formatter() + +class BufferingFormatter: + """ + A formatter suitable for formatting a number of records. + """ + def __init__(self, linefmt=None): + """ + Optionally specify a formatter which will be used to format each + individual record. + """ + if linefmt: + self.linefmt = linefmt + else: + self.linefmt = _defaultFormatter + + def formatHeader(self, records): + """ + Return the header string for the specified records. + """ + return "" + + def formatFooter(self, records): + """ + Return the footer string for the specified records. + """ + return "" + + def format(self, records): + """ + Format the specified records and return the result as a string. + """ + rv = "" + if len(records) > 0: + rv = rv + self.formatHeader(records) + for record in records: + rv = rv + self.linefmt.format(record) + rv = rv + self.formatFooter(records) + return rv + +#--------------------------------------------------------------------------- +# Filter classes and functions +#--------------------------------------------------------------------------- + +class Filter: + """ + Filter instances are used to perform arbitrary filtering of LogRecords. + + Loggers and Handlers can optionally use Filter instances to filter + records as desired. The base filter class only allows events which are + below a certain point in the logger hierarchy. For example, a filter + initialized with "A.B" will allow events logged by loggers "A.B", + "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If + initialized with the empty string, all events are passed. + """ + def __init__(self, name=''): + """ + Initialize a filter. + + Initialize with the name of the logger which, together with its + children, will have its events allowed through the filter. If no + name is specified, allow every event. + """ + self.name = name + self.nlen = len(name) + + def filter(self, record): + """ + Determine if the specified record is to be logged. + + Is the specified record to be logged? Returns 0 for no, nonzero for + yes. If deemed appropriate, the record may be modified in-place. + """ + if self.nlen == 0: + return 1 + elif self.name == record.name: + return 1 + elif string.find(record.name, self.name, 0, self.nlen) != 0: + return 0 + return (record.name[self.nlen] == ".") + +class Filterer: + """ + A base class for loggers and handlers which allows them to share + common code. + """ + def __init__(self): + """ + Initialize the list of filters to be an empty list. + """ + self.filters = [] + + def addFilter(self, filter): + """ + Add the specified filter to this handler. + """ + if not (filter in self.filters): + self.filters.append(filter) + + def removeFilter(self, filter): + """ + Remove the specified filter from this handler. + """ + if filter in self.filters: + self.filters.remove(filter) + + def filter(self, record): + """ + Determine if a record is loggable by consulting all the filters. + + The default is to allow the record to be logged; any filter can veto + this and the record is then dropped. Returns a zero value if a record + is to be dropped, else non-zero. + """ + rv = 1 + for f in self.filters: + if not f.filter(record): + rv = 0 + break + return rv + +#--------------------------------------------------------------------------- +# Handler classes and functions +#--------------------------------------------------------------------------- + +_handlers = {} #repository of handlers (for flushing when shutdown called) + +class Handler(Filterer): + """ + Handler instances dispatch logging events to specific destinations. + + The base handler class. Acts as a placeholder which defines the Handler + interface. Handlers can optionally use Formatter instances to format + records as desired. By default, no formatter is specified; in this case, + the 'raw' message as determined by record.message is logged. + """ + def __init__(self, level=NOTSET): + """ + Initializes the instance - basically setting the formatter to None + and the filter list to empty. + """ + Filterer.__init__(self) + self.level = level + self.formatter = None + #get the module data lock, as we're updating a shared structure. + _acquireLock() + try: #unlikely to raise an exception, but you never know... + _handlers[self] = 1 + finally: + _releaseLock() + self.createLock() + + def createLock(self): + """ + Acquire a thread lock for serializing access to the underlying I/O. + """ + if thread: + self.lock = thread.allocate_lock() + else: + self.lock = None + + def acquire(self): + """ + Acquire the I/O thread lock. + """ + if self.lock: + self.lock.acquire() + + def release(self): + """ + Release the I/O thread lock. + """ + if self.lock: + self.lock.release() + + def setLevel(self, level): + """ + Set the logging level of this handler. + """ + self.level = level + + def format(self, record): + """ + Format the specified record. + + If a formatter is set, use it. Otherwise, use the default formatter + for the module. + """ + if self.formatter: + fmt = self.formatter + else: + fmt = _defaultFormatter + return fmt.format(record) + + def emit(self, record): + """ + Do whatever it takes to actually log the specified logging record. + + This version is intended to be implemented by subclasses and so + raises a NotImplementedError. + """ + raise NotImplementedError, 'emit must be implemented '\ + 'by Handler subclasses' + + def handle(self, record): + """ + Conditionally emit the specified logging record. + + Emission depends on filters which may have been added to the handler. + Wrap the actual emission of the record with acquisition/release of + the I/O thread lock. Returns whether the filter passed the record for + emission. + """ + rv = self.filter(record) + if rv: + self.acquire() + try: + self.emit(record) + finally: + self.release() + return rv + + def setFormatter(self, fmt): + """ + Set the formatter for this handler. + """ + self.formatter = fmt + + def flush(self): + """ + Ensure all logging output has been flushed. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def close(self): + """ + Tidy up any resources used by the handler. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def handleError(self, record): + """ + Handle errors which occur during an emit() call. + + This method should be called from handlers when an exception is + encountered during an emit() call. If raiseExceptions is false, + exceptions get silently ignored. This is what is mostly wanted + for a logging system - most users will not care about errors in + the logging system, they are more interested in application errors. + You could, however, replace this with a custom handler if you wish. + The record which was being processed is passed in to this method. + """ + if raiseExceptions: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + +class StreamHandler(Handler): + """ + A handler class which writes logging records, appropriately formatted, + to a stream. Note that this class does not close the stream, as + sys.stdout or sys.stderr may be used. + """ + def __init__(self, strm=None): + """ + Initialize the handler. + + If strm is not specified, sys.stderr is used. + """ + Handler.__init__(self) + if not strm: + strm = sys.stderr + self.stream = strm + self.formatter = None + + def flush(self): + """ + Flushes the stream. + """ + self.stream.flush() + + def emit(self, record): + """ + Emit a record. + + If a formatter is specified, it is used to format the record. + The record is then written to the stream with a trailing newline + [N.B. this may be removed depending on feedback]. If exception + information is present, it is formatted using + traceback.print_exception and appended to the stream. + """ + try: + msg = self.format(record) + if not hasattr(types, "UnicodeType"): #if no unicode support... + self.stream.write("%s\n" % msg) + else: + try: + self.stream.write("%s\n" % msg) + except UnicodeError: + self.stream.write("%s\n" % msg.encode("UTF-8")) + self.flush() + except: + self.handleError(record) + +class FileHandler(StreamHandler): + """ + A handler class which writes formatted logging records to disk files. + """ + def __init__(self, filename, mode="a"): + """ + Open the specified file and use it as the stream for logging. + """ + StreamHandler.__init__(self, open(filename, mode)) + self.baseFilename = filename + self.mode = mode + + def close(self): + """ + Closes the stream. + """ + self.stream.close() + +#--------------------------------------------------------------------------- +# Manager classes and functions +#--------------------------------------------------------------------------- + +class PlaceHolder: + """ + PlaceHolder instances are used in the Manager logger hierarchy to take + the place of nodes for which no loggers have been defined [FIXME add + example]. + """ + def __init__(self, alogger): + """ + Initialize with the specified logger being a child of this placeholder. + """ + self.loggers = [alogger] + + def append(self, alogger): + """ + Add the specified logger as a child of this placeholder. + """ + if alogger not in self.loggers: + self.loggers.append(alogger) + +# +# Determine which class to use when instantiating loggers. +# +_loggerClass = None + +def setLoggerClass(klass): + """ + Set the class to be used when instantiating a logger. The class should + define __init__() such that only a name argument is required, and the + __init__() should call Logger.__init__() + """ + if klass != Logger: + if not issubclass(klass, Logger): + raise TypeError, "logger not derived from logging.Logger: " + \ + klass.__name__ + global _loggerClass + _loggerClass = klass + +class Manager: + """ + There is [under normal circumstances] just one Manager instance, which + holds the hierarchy of loggers. + """ + def __init__(self, rootnode): + """ + Initialize the manager with the root node of the logger hierarchy. + """ + self.root = rootnode + self.disable = 0 + self.emittedNoHandlerWarning = 0 + self.loggerDict = {} + + def getLogger(self, name): + """ + Get a logger with the specified name (channel name), creating it + if it doesn't yet exist. + + If a PlaceHolder existed for the specified name [i.e. the logger + didn't exist but a child of it did], replace it with the created + logger and fix up the parent/child references which pointed to the + placeholder to now point to the logger. + """ + rv = None + _acquireLock() + try: + if self.loggerDict.has_key(name): + rv = self.loggerDict[name] + if isinstance(rv, PlaceHolder): + ph = rv + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupChildren(ph, rv) + self._fixupParents(rv) + else: + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupParents(rv) + finally: + _releaseLock() + return rv + + def _fixupParents(self, alogger): + """ + Ensure that there are either loggers or placeholders all the way + from the specified logger to the root of the logger hierarchy. + """ + name = alogger.name + i = string.rfind(name, ".") + rv = None + while (i > 0) and not rv: + substr = name[:i] + if not self.loggerDict.has_key(substr): + self.loggerDict[substr] = PlaceHolder(alogger) + else: + obj = self.loggerDict[substr] + if isinstance(obj, Logger): + rv = obj + else: + assert isinstance(obj, PlaceHolder) + obj.append(alogger) + i = string.rfind(name, ".", 0, i - 1) + if not rv: + rv = self.root + alogger.parent = rv + + def _fixupChildren(self, ph, alogger): + """ + Ensure that children of the placeholder ph are connected to the + specified logger. + """ + for c in ph.loggers: + if string.find(c.parent.name, alogger.name) <> 0: + alogger.parent = c.parent + c.parent = alogger + +#--------------------------------------------------------------------------- +# Logger classes and functions +#--------------------------------------------------------------------------- + +class Logger(Filterer): + """ + Instances of the Logger class represent a single logging channel. A + "logging channel" indicates an area of an application. Exactly how an + "area" is defined is up to the application developer. Since an + application can have any number of areas, logging channels are identified + by a unique string. Application areas can be nested (e.g. an area + of "input processing" might include sub-areas "read CSV files", "read + XLS files" and "read Gnumeric files"). To cater for this natural nesting, + channel names are organized into a namespace hierarchy where levels are + separated by periods, much like the Java or Python package namespace. So + in the instance given above, channel names might be "input" for the upper + level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. + There is no arbitrary limit to the depth of nesting. + """ + def __init__(self, name, level=NOTSET): + """ + Initialize the logger with a name and an optional level. + """ + Filterer.__init__(self) + self.name = name + self.level = level + self.parent = None + self.propagate = 1 + self.handlers = [] + self.disabled = 0 + + def setLevel(self, level): + """ + Set the logging level of this logger. + """ + self.level = level + +# def getRoot(self): +# """ +# Get the root of the logger hierarchy. +# """ +# return Logger.root + + def debug(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'DEBUG'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) + """ + if self.manager.disable >= DEBUG: + return + if DEBUG >= self.getEffectiveLevel(): + apply(self._log, (DEBUG, msg, args), kwargs) + + def info(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'INFO'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.info("Houston, we have a %s", "interesting problem", exc_info=1) + """ + if self.manager.disable >= INFO: + return + if INFO >= self.getEffectiveLevel(): + apply(self._log, (INFO, msg, args), kwargs) + + def warning(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'WARNING'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) + """ + if self.manager.disable >= WARNING: + return + if self.isEnabledFor(WARNING): + apply(self._log, (WARNING, msg, args), kwargs) + + warn = warning + + def error(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'ERROR'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.error("Houston, we have a %s", "major problem", exc_info=1) + """ + if self.manager.disable >= ERROR: + return + if self.isEnabledFor(ERROR): + apply(self._log, (ERROR, msg, args), kwargs) + + def exception(self, msg, *args): + """ + Convenience method for logging an ERROR with exception information. + """ + apply(self.error, (msg,) + args, {'exc_info': 1}) + + def critical(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'CRITICAL'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.critical("Houston, we have a %s", "major disaster", exc_info=1) + """ + if self.manager.disable >= CRITICAL: + return + if CRITICAL >= self.getEffectiveLevel(): + apply(self._log, (CRITICAL, msg, args), kwargs) + + fatal = critical + + def log(self, level, msg, *args, **kwargs): + """ + Log 'msg % args' with the severity 'level'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.log(level, "We have a %s", "mysterious problem", exc_info=1) + """ + if self.manager.disable >= level: + return + if self.isEnabledFor(level): + apply(self._log, (level, msg, args), kwargs) + + def findCaller(self): + """ + Find the stack frame of the caller so that we can note the source + file name and line number. + """ + f = sys._getframe(1) + while 1: + co = f.f_code + filename = os.path.normcase(co.co_filename) + if filename == _srcfile: + f = f.f_back + continue + return filename, f.f_lineno + + def makeRecord(self, name, level, fn, lno, msg, args, exc_info): + """ + A factory method which can be overridden in subclasses to create + specialized LogRecords. + """ + return LogRecord(name, level, fn, lno, msg, args, exc_info) + + def _log(self, level, msg, args, exc_info=None): + """ + Low-level logging routine which creates a LogRecord and then calls + all the handlers of this logger to handle the record. + """ + if _srcfile: + fn, lno = self.findCaller() + else: + fn, lno = "", 0 + if exc_info: + exc_info = sys.exc_info() + record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info) + self.handle(record) + + def handle(self, record): + """ + Call the handlers for the specified record. + + This method is used for unpickled records received from a socket, as + well as those created locally. Logger-level filtering is applied. + """ + if (not self.disabled) and self.filter(record): + self.callHandlers(record) + + def addHandler(self, hdlr): + """ + Add the specified handler to this logger. + """ + if not (hdlr in self.handlers): + self.handlers.append(hdlr) + + def removeHandler(self, hdlr): + """ + Remove the specified handler from this logger. + """ + if hdlr in self.handlers: + #hdlr.close() + self.handlers.remove(hdlr) + + def callHandlers(self, record): + """ + Pass a record to all relevant handlers. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. If no handler was found, output a one-off error + message to sys.stderr. Stop searching up the hierarchy whenever a + logger with the "propagate" attribute set to zero is found - that + will be the last logger whose handlers are called. + """ + c = self + found = 0 + while c: + for hdlr in c.handlers: + found = found + 1 + if record.levelno >= hdlr.level: + hdlr.handle(record) + if not c.propagate: + c = None #break out + else: + c = c.parent + if (found == 0) and not self.manager.emittedNoHandlerWarning: + sys.stderr.write("No handlers could be found for logger" + " \"%s\"\n" % self.name) + self.manager.emittedNoHandlerWarning = 1 + + def getEffectiveLevel(self): + """ + Get the effective level for this logger. + + Loop through this logger and its parents in the logger hierarchy, + looking for a non-zero logging level. Return the first one found. + """ + logger = self + while logger: + if logger.level: + return logger.level + logger = logger.parent + return NOTSET + + def isEnabledFor(self, level): + """ + Is this logger enabled for level 'level'? + """ + if self.manager.disable >= level: + return 0 + return level >= self.getEffectiveLevel() + +class RootLogger(Logger): + """ + A root logger is not that different to any other logger, except that + it must have a logging level and there is only one instance of it in + the hierarchy. + """ + def __init__(self, level): + """ + Initialize the logger with the name "root". + """ + Logger.__init__(self, "root", level) + +_loggerClass = Logger + +root = RootLogger(WARNING) +Logger.root = root +Logger.manager = Manager(Logger.root) + +#--------------------------------------------------------------------------- +# Configuration classes and functions +#--------------------------------------------------------------------------- + +BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" + +def basicConfig(): + """ + Do basic configuration for the logging system by creating a + StreamHandler with a default Formatter and adding it to the + root logger. + """ + if len(root.handlers) == 0: + hdlr = StreamHandler() + fmt = Formatter(BASIC_FORMAT) + hdlr.setFormatter(fmt) + root.addHandler(hdlr) + +#--------------------------------------------------------------------------- +# Utility functions at module level. +# Basically delegate everything to the root logger. +#--------------------------------------------------------------------------- + +def getLogger(name=None): + """ + Return a logger with the specified name, creating it if necessary. + + If no name is specified, return the root logger. + """ + if name: + return Logger.manager.getLogger(name) + else: + return root + +#def getRootLogger(): +# """ +# Return the root logger. +# +# Note that getLogger('') now does the same thing, so this function is +# deprecated and may disappear in the future. +# """ +# return root + +def critical(msg, *args, **kwargs): + """ + Log a message with severity 'CRITICAL' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.critical, (msg,)+args, kwargs) + +fatal = critical + +def error(msg, *args, **kwargs): + """ + Log a message with severity 'ERROR' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.error, (msg,)+args, kwargs) + +def exception(msg, *args): + """ + Log a message with severity 'ERROR' on the root logger, + with exception information. + """ + apply(error, (msg,)+args, {'exc_info': 1}) + +def warning(msg, *args, **kwargs): + """ + Log a message with severity 'WARNING' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.warning, (msg,)+args, kwargs) + +warn = warning + +def info(msg, *args, **kwargs): + """ + Log a message with severity 'INFO' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.info, (msg,)+args, kwargs) + +def debug(msg, *args, **kwargs): + """ + Log a message with severity 'DEBUG' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.debug, (msg,)+args, kwargs) + +def disable(level): + """ + Disable all logging calls less severe than 'level'. + """ + root.manager.disable = level + +def shutdown(): + """ + Perform any cleanup actions in the logging system (e.g. flushing + buffers). + + Should be called at application exit. + """ + for h in _handlers.keys(): + h.flush() + h.close() diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/compat_logging/config.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/compat_logging/config.py new file mode 100755 index 0000000..d4d08f0 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/compat_logging/config.py @@ -0,0 +1,299 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, logging.handlers, string, thread, threading, socket, struct, os + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + + +DEFAULT_LOGGING_CONFIG_PORT = 9030 +if sys.platform == "win32": + RESET_ERROR = 10054 #WSAECONNRESET +else: + RESET_ERROR = 104 #ECONNRESET + +# +# The following code implements a socket listener for on-the-fly +# reconfiguration of logging. +# +# _listener holds the server object doing the listening +_listener = None + +def fileConfig(fname, defaults=None): + """ + Read the logging configuration from a ConfigParser-format file. + + This can be called several times from an application, allowing an end user + the ability to select from various pre-canned configurations (if the + developer provides a mechanism to present the choices and load the chosen + configuration). + In versions of ConfigParser which have the readfp method [typically + shipped in 2.x versions of Python], you can pass in a file-like object + rather than a filename, in which case the file-like object will be read + using readfp. + """ + import ConfigParser + + cp = ConfigParser.ConfigParser(defaults) + if hasattr(cp, 'readfp') and hasattr(fname, 'readline'): + cp.readfp(fname) + else: + cp.read(fname) + #first, do the formatters... + flist = cp.get("formatters", "keys") + if len(flist): + flist = string.split(flist, ",") + formatters = {} + for form in flist: + sectname = "formatter_%s" % form + opts = cp.options(sectname) + if "format" in opts: + fs = cp.get(sectname, "format", 1) + else: + fs = None + if "datefmt" in opts: + dfs = cp.get(sectname, "datefmt", 1) + else: + dfs = None + f = logging.Formatter(fs, dfs) + formatters[form] = f + #next, do the handlers... + #critical section... + logging._acquireLock() + try: + try: + #first, lose the existing handlers... + logging._handlers.clear() + #now set up the new ones... + hlist = cp.get("handlers", "keys") + if len(hlist): + hlist = string.split(hlist, ",") + handlers = {} + fixups = [] #for inter-handler references + for hand in hlist: + sectname = "handler_%s" % hand + klass = cp.get(sectname, "class") + opts = cp.options(sectname) + if "formatter" in opts: + fmt = cp.get(sectname, "formatter") + else: + fmt = "" + klass = eval(klass, vars(logging)) + args = cp.get(sectname, "args") + args = eval(args, vars(logging)) + h = apply(klass, args) + if "level" in opts: + level = cp.get(sectname, "level") + h.setLevel(logging._levelNames[level]) + if len(fmt): + h.setFormatter(formatters[fmt]) + #temporary hack for FileHandler and MemoryHandler. + if klass == logging.handlers.MemoryHandler: + if "target" in opts: + target = cp.get(sectname,"target") + else: + target = "" + if len(target): #the target handler may not be loaded yet, so keep for later... + fixups.append((h, target)) + handlers[hand] = h + #now all handlers are loaded, fixup inter-handler references... + for fixup in fixups: + h = fixup[0] + t = fixup[1] + h.setTarget(handlers[t]) + #at last, the loggers...first the root... + llist = cp.get("loggers", "keys") + llist = string.split(llist, ",") + llist.remove("root") + sectname = "logger_root" + root = logging.root + log = root + opts = cp.options(sectname) + if "level" in opts: + level = cp.get(sectname, "level") + log.setLevel(logging._levelNames[level]) + for h in root.handlers[:]: + root.removeHandler(h) + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + log.addHandler(handlers[hand]) + #and now the others... + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + existing = root.manager.loggerDict.keys() + #now set up the new ones... + for log in llist: + sectname = "logger_%s" % log + qn = cp.get(sectname, "qualname") + opts = cp.options(sectname) + if "propagate" in opts: + propagate = cp.getint(sectname, "propagate") + else: + propagate = 1 + logger = logging.getLogger(qn) + if qn in existing: + existing.remove(qn) + if "level" in opts: + level = cp.get(sectname, "level") + logger.setLevel(logging._levelNames[level]) + for h in logger.handlers[:]: + logger.removeHandler(h) + logger.propagate = propagate + logger.disabled = 0 + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + logger.addHandler(handlers[hand]) + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + for log in existing: + root.manager.loggerDict[log].disabled = 1 + except: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + finally: + logging._releaseLock() + +def listen(port=DEFAULT_LOGGING_CONFIG_PORT): + """ + Start up a socket server on the specified port, and listen for new + configurations. + + These will be sent as a file suitable for processing by fileConfig(). + Returns a Thread object on which you can call start() to start the server, + and which you can join() when appropriate. To stop the server, call + stopListening(). + """ + if not thread: + raise NotImplementedError, "listen() needs threading to work" + + class ConfigStreamHandler(StreamRequestHandler): + """ + Handler for a logging configuration request. + + It expects a completely new logging configuration and uses fileConfig + to install it. + """ + def handle(self): + """ + Handle a request. + + Each request is expected to be a 4-byte length, + followed by the config file. Uses fileConfig() to do the + grunt work. + """ + import tempfile + try: + conn = self.connection + chunk = conn.recv(4) + if len(chunk) == 4: + slen = struct.unpack(">L", chunk)[0] + chunk = self.connection.recv(slen) + while len(chunk) < slen: + chunk = chunk + conn.recv(slen - len(chunk)) + #Apply new configuration. We'd like to be able to + #create a StringIO and pass that in, but unfortunately + #1.5.2 ConfigParser does not support reading file + #objects, only actual files. So we create a temporary + #file and remove it later. + file = tempfile.mktemp(".ini") + f = open(file, "w") + f.write(chunk) + f.close() + fileConfig(file) + os.remove(file) + except socket.error, e: + if type(e.args) != types.TupleType: + raise + else: + errcode = e.args[0] + if errcode != RESET_ERROR: + raise + + class ConfigSocketReceiver(ThreadingTCPServer): + """ + A simple TCP socket-based logging config receiver. + """ + + allow_reuse_address = 1 + + def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, + handler=None): + ThreadingTCPServer.__init__(self, (host, port), handler) + logging._acquireLock() + self.abort = 0 + logging._releaseLock() + self.timeout = 1 + + def serve_until_stopped(self): + import select + abort = 0 + while not abort: + rd, wr, ex = select.select([self.socket.fileno()], + [], [], + self.timeout) + if rd: + self.handle_request() + logging._acquireLock() + abort = self.abort + logging._releaseLock() + + def serve(rcvr, hdlr, port): + server = rcvr(port=port, handler=hdlr) + global _listener + logging._acquireLock() + _listener = server + logging._releaseLock() + server.serve_until_stopped() + + return threading.Thread(target=serve, + args=(ConfigSocketReceiver, + ConfigStreamHandler, port)) + +def stopListening(): + """ + Stop the listening server which was created with a call to listen(). + """ + global _listener + if _listener: + logging._acquireLock() + _listener.abort = 1 + _listener = None + logging._releaseLock() diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/compat_logging/handlers.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/compat_logging/handlers.py new file mode 100755 index 0000000..26ca8ad --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/compat_logging/handlers.py @@ -0,0 +1,728 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, socket, types, os, string, cPickle, struct, time + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + +# +# Some constants... +# + +DEFAULT_TCP_LOGGING_PORT = 9020 +DEFAULT_UDP_LOGGING_PORT = 9021 +DEFAULT_HTTP_LOGGING_PORT = 9022 +DEFAULT_SOAP_LOGGING_PORT = 9023 +SYSLOG_UDP_PORT = 514 + + +class RotatingFileHandler(logging.FileHandler): + def __init__(self, filename, mode="a", maxBytes=0, backupCount=0): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + """ + logging.FileHandler.__init__(self, filename, mode) + self.maxBytes = maxBytes + self.backupCount = backupCount + if maxBytes > 0: + self.mode = "a" + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + + self.stream.close() + if self.backupCount > 0: + for i in range(self.backupCount - 1, 0, -1): + sfn = "%s.%d" % (self.baseFilename, i) + dfn = "%s.%d" % (self.baseFilename, i + 1) + if os.path.exists(sfn): + #print "%s -> %s" % (sfn, dfn) + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.baseFilename + ".1" + if os.path.exists(dfn): + os.remove(dfn) + os.rename(self.baseFilename, dfn) + #print "%s -> %s" % (self.baseFilename, dfn) + self.stream = open(self.baseFilename, "w") + + def emit(self, record): + """ + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + """ + if self.maxBytes > 0: # are we rolling over? + msg = "%s\n" % self.format(record) + self.stream.seek(0, 2) #due to non-posix-compliant Windows feature + if self.stream.tell() + len(msg) >= self.maxBytes: + self.doRollover() + logging.FileHandler.emit(self, record) + + +class SocketHandler(logging.Handler): + """ + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + """ + + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + + The attribute 'closeOnError' is set to 1 - which means that if + a socket error occurs, the socket is silently closed and then + reopened on the next logging call. + """ + logging.Handler.__init__(self) + self.host = host + self.port = port + self.sock = None + self.closeOnError = 0 + + def makeSocket(self): + """ + A factory method which allows subclasses to define the precise + type of socket they want. + """ + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((self.host, self.port)) + return s + + def send(self, s): + """ + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + """ + if hasattr(self.sock, "sendall"): + self.sock.sendall(s) + else: + sentsofar = 0 + left = len(s) + while left > 0: + sent = self.sock.send(s[sentsofar:]) + sentsofar = sentsofar + sent + left = left - sent + + def makePickle(self, record): + """ + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + """ + s = cPickle.dumps(record.__dict__, 1) + #n = len(s) + #slen = "%c%c" % ((n >> 8) & 0xFF, n & 0xFF) + slen = struct.pack(">L", len(s)) + return slen + s + + def handleError(self, record): + """ + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + """ + if self.closeOnError and self.sock: + self.sock.close() + self.sock = None #try to reconnect next time + else: + logging.Handler.handleError(self, record) + + def emit(self, record): + """ + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + """ + try: + s = self.makePickle(record) + if not self.sock: + self.sock = self.makeSocket() + self.send(s) + except: + self.handleError(record) + + def close(self): + """ + Closes the socket. + """ + if self.sock: + self.sock.close() + self.sock = None + +class DatagramHandler(SocketHandler): + """ + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + """ + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + """ + SocketHandler.__init__(self, host, port) + self.closeOnError = 0 + + def makeSocket(self): + """ + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + """ + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + return s + + def send(self, s): + """ + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + """ + self.sock.sendto(s, (self.host, self.port)) + +class SysLogHandler(logging.Handler): + """ + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + """ + + # from : + # ====================================================================== + # priorities/facilities are encoded into a single 32-bit quantity, where + # the bottom 3 bits are the priority (0-7) and the top 28 bits are the + # facility (0-big number). Both the priorities and the facilities map + # roughly one-to-one to strings in the syslogd(8) source code. This + # mapping is included in this file. + # + # priorities (these are ordered) + + LOG_EMERG = 0 # system is unusable + LOG_ALERT = 1 # action must be taken immediately + LOG_CRIT = 2 # critical conditions + LOG_ERR = 3 # error conditions + LOG_WARNING = 4 # warning conditions + LOG_NOTICE = 5 # normal but significant condition + LOG_INFO = 6 # informational + LOG_DEBUG = 7 # debug-level messages + + # facility codes + LOG_KERN = 0 # kernel messages + LOG_USER = 1 # random user-level messages + LOG_MAIL = 2 # mail system + LOG_DAEMON = 3 # system daemons + LOG_AUTH = 4 # security/authorization messages + LOG_SYSLOG = 5 # messages generated internally by syslogd + LOG_LPR = 6 # line printer subsystem + LOG_NEWS = 7 # network news subsystem + LOG_UUCP = 8 # UUCP subsystem + LOG_CRON = 9 # clock daemon + LOG_AUTHPRIV = 10 # security/authorization messages (private) + + # other codes through 15 reserved for system use + LOG_LOCAL0 = 16 # reserved for local use + LOG_LOCAL1 = 17 # reserved for local use + LOG_LOCAL2 = 18 # reserved for local use + LOG_LOCAL3 = 19 # reserved for local use + LOG_LOCAL4 = 20 # reserved for local use + LOG_LOCAL5 = 21 # reserved for local use + LOG_LOCAL6 = 22 # reserved for local use + LOG_LOCAL7 = 23 # reserved for local use + + priority_names = { + "alert": LOG_ALERT, + "crit": LOG_CRIT, + "critical": LOG_CRIT, + "debug": LOG_DEBUG, + "emerg": LOG_EMERG, + "err": LOG_ERR, + "error": LOG_ERR, # DEPRECATED + "info": LOG_INFO, + "notice": LOG_NOTICE, + "panic": LOG_EMERG, # DEPRECATED + "warn": LOG_WARNING, # DEPRECATED + "warning": LOG_WARNING, + } + + facility_names = { + "auth": LOG_AUTH, + "authpriv": LOG_AUTHPRIV, + "cron": LOG_CRON, + "daemon": LOG_DAEMON, + "kern": LOG_KERN, + "lpr": LOG_LPR, + "mail": LOG_MAIL, + "news": LOG_NEWS, + "security": LOG_AUTH, # DEPRECATED + "syslog": LOG_SYSLOG, + "user": LOG_USER, + "uucp": LOG_UUCP, + "local0": LOG_LOCAL0, + "local1": LOG_LOCAL1, + "local2": LOG_LOCAL2, + "local3": LOG_LOCAL3, + "local4": LOG_LOCAL4, + "local5": LOG_LOCAL5, + "local6": LOG_LOCAL6, + "local7": LOG_LOCAL7, + } + + def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): + """ + Initialize a handler. + + If address is specified as a string, UNIX socket is used. + If facility is not specified, LOG_USER is used. + """ + logging.Handler.__init__(self) + + self.address = address + self.facility = facility + if type(address) == types.StringType: + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + # syslog may require either DGRAM or STREAM sockets + try: + self.socket.connect(address) + except socket.error: + self.socket.close() + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.socket.connect(address) + self.unixsocket = 1 + else: + self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.unixsocket = 0 + + self.formatter = None + + # curious: when talking to the unix-domain '/dev/log' socket, a + # zero-terminator seems to be required. this string is placed + # into a class variable so that it can be overridden if + # necessary. + log_format_string = '<%d>%s\000' + + def encodePriority (self, facility, priority): + """ + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + """ + if type(facility) == types.StringType: + facility = self.facility_names[facility] + if type(priority) == types.StringType: + priority = self.priority_names[priority] + return (facility << 3) | priority + + def close (self): + """ + Closes the socket. + """ + if self.unixsocket: + self.socket.close() + + def emit(self, record): + """ + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + """ + msg = self.format(record) + """ + We need to convert record level to lowercase, maybe this will + change in the future. + """ + msg = self.log_format_string % ( + self.encodePriority(self.facility, + string.lower(record.levelname)), + msg) + try: + if self.unixsocket: + self.socket.send(msg) + else: + self.socket.sendto(msg, self.address) + except: + self.handleError(record) + +class SMTPHandler(logging.Handler): + """ + A handler class which sends an SMTP email for each logging event. + """ + def __init__(self, mailhost, fromaddr, toaddrs, subject): + """ + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. + """ + logging.Handler.__init__(self) + if type(mailhost) == types.TupleType: + host, port = mailhost + self.mailhost = host + self.mailport = port + else: + self.mailhost = mailhost + self.mailport = None + self.fromaddr = fromaddr + if type(toaddrs) == types.StringType: + toaddrs = [toaddrs] + self.toaddrs = toaddrs + self.subject = subject + + def getSubject(self, record): + """ + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + """ + return self.subject + + weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + + monthname = [None, + 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + + def date_time(self): + """Return the current date and time formatted for a MIME header.""" + year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time()) + s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + self.weekdayname[wd], + day, self.monthname[month], year, + hh, mm, ss) + return s + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + """ + try: + import smtplib + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port) + msg = self.format(record) + msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( + self.fromaddr, + string.join(self.toaddrs, ","), + self.getSubject(record), + self.date_time(), msg) + smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.quit() + except: + self.handleError(record) + +class NTEventLogHandler(logging.Handler): + """ + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + """ + def __init__(self, appname, dllname=None, logtype="Application"): + logging.Handler.__init__(self) + try: + import win32evtlogutil, win32evtlog + self.appname = appname + self._welu = win32evtlogutil + if not dllname: + dllname = os.path.split(self._welu.__file__) + dllname = os.path.split(dllname[0]) + dllname = os.path.join(dllname[0], r'win32service.pyd') + self.dllname = dllname + self.logtype = logtype + self._welu.AddSourceToRegistry(appname, dllname, logtype) + self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE + self.typemap = { + logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, + logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, + logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, + } + except ImportError: + print "The Python Win32 extensions for NT (service, event "\ + "logging) appear not to be available." + self._welu = None + + def getMessageID(self, record): + """ + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + """ + return 1 + + def getEventCategory(self, record): + """ + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + """ + return 0 + + def getEventType(self, record): + """ + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + """ + return self.typemap.get(record.levelno, self.deftype) + + def emit(self, record): + """ + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + """ + if self._welu: + try: + id = self.getMessageID(record) + cat = self.getEventCategory(record) + type = self.getEventType(record) + msg = self.format(record) + self._welu.ReportEvent(self.appname, id, cat, type, [msg]) + except: + self.handleError(record) + + def close(self): + """ + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + """ + #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) + pass + +class HTTPHandler(logging.Handler): + """ + A class which sends records to a Web server, using either GET or + POST semantics. + """ + def __init__(self, host, url, method="GET"): + """ + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + """ + logging.Handler.__init__(self) + method = string.upper(method) + if method not in ["GET", "POST"]: + raise ValueError, "method must be GET or POST" + self.host = host + self.url = url + self.method = method + + def mapLogRecord(self, record): + """ + Default implementation of mapping the log record into a dict + that is send as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + """ + return record.__dict__ + + def emit(self, record): + """ + Emit a record. + + Send the record to the Web server as an URL-encoded dictionary + """ + try: + import httplib, urllib + h = httplib.HTTP(self.host) + url = self.url + data = urllib.urlencode(self.mapLogRecord(record)) + if self.method == "GET": + if (string.find(url, '?') >= 0): + sep = '&' + else: + sep = '?' + url = url + "%c%s" % (sep, data) + h.putrequest(self.method, url) + if self.method == "POST": + h.putheader("Content-length", str(len(data))) + h.endheaders() + if self.method == "POST": + h.send(data) + h.getreply() #can't do anything with the result + except: + self.handleError(record) + +class BufferingHandler(logging.Handler): + """ + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + """ + def __init__(self, capacity): + """ + Initialize the handler with the buffer size. + """ + logging.Handler.__init__(self) + self.capacity = capacity + self.buffer = [] + + def shouldFlush(self, record): + """ + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + """ + return (len(self.buffer) >= self.capacity) + + def emit(self, record): + """ + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + """ + self.buffer.append(record) + if self.shouldFlush(record): + self.flush() + + def flush(self): + """ + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + """ + self.buffer = [] + +class MemoryHandler(BufferingHandler): + """ + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + """ + def __init__(self, capacity, flushLevel=logging.ERROR, target=None): + """ + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + """ + BufferingHandler.__init__(self, capacity) + self.flushLevel = flushLevel + self.target = target + + def shouldFlush(self, record): + """ + Check for buffer full or a record at the flushLevel or higher. + """ + return (len(self.buffer) >= self.capacity) or \ + (record.levelno >= self.flushLevel) + + def setTarget(self, target): + """ + Set the target handler for this handler. + """ + self.target = target + + def flush(self): + """ + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + """ + if self.target: + for record in self.buffer: + self.target.handle(record) + self.buffer = [] + + def close(self): + """ + Flush, set the target to None and lose the buffer. + """ + self.flush() + self.target = None + self.buffer = [] diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/feedparser.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/feedparser.py new file mode 100755 index 0000000..615ee7e --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/feedparser.py @@ -0,0 +1,2931 @@ +#!/usr/bin/env python +"""Universal feed parser + +Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds + +Visit http://feedparser.org/ for the latest version +Visit http://feedparser.org/docs/ for the latest documentation + +Required: Python 2.1 or later +Recommended: Python 2.3 or later +Recommended: CJKCodecs and iconv_codec +""" + +__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs" +__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.""" +__author__ = "Mark Pilgrim " +__contributors__ = ["Jason Diamond ", + "John Beimler ", + "Fazal Majid ", + "Aaron Swartz ", + "Kevin Marks "] +_debug = 0 + +# HTTP "User-Agent" header to send to servers when downloading feeds. +# If you are embedding feedparser in a larger application, you should +# change this to your application name and URL. +USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ + +# HTTP "Accept" header to send to servers when downloading feeds. If you don't +# want to send an Accept header, set this to None. +ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" + +# List of preferred XML parsers, by SAX driver name. These will be tried first, +# but if they're not installed, Python will keep searching through its own list +# of pre-installed parsers until it finds one that supports everything we need. +PREFERRED_XML_PARSERS = ["drv_libxml2"] + +# If you want feedparser to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +# ---------- required modules (should come with any Python distribution) ---------- +import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 +try: + from cStringIO import StringIO as _StringIO +except: + from StringIO import StringIO as _StringIO + +# ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- + +# gzip is included with most Python distributions, but may not be available if you compiled your own +try: + import gzip +except: + gzip = None +try: + import zlib +except: + zlib = None + +# If a real XML parser is available, feedparser will attempt to use it. feedparser has +# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the +# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some +# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. +try: + import xml.sax + xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers + from xml.sax.saxutils import escape as _xmlescape + _XML_AVAILABLE = 1 +except: + _XML_AVAILABLE = 0 + def _xmlescape(data,entities={}): + data = data.replace('&', '&') + data = data.replace('>', '>') + data = data.replace('<', '<') + for char, entity in entities: + data = data.replace(char, entity) + return data + +# base64 support for Atom feeds that contain embedded binary data +try: + import base64, binascii +except: + base64 = binascii = None + +# cjkcodecs and iconv_codec provide support for more character encodings. +# Both are available from http://cjkpython.i18n.org/ +try: + import cjkcodecs.aliases +except: + pass +try: + import iconv_codec +except: + pass + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 +except: + chardet = None + +# ---------- don't touch these ---------- +class ThingsNobodyCaresAboutButMe(Exception): pass +class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass +class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass +class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass +class UndeclaredNamespace(Exception): pass + +sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') +sgmllib.special = re.compile('' % (tag, self.strattrs(attrs)), escape=0) + + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # special hack for better tracking of empty textinput/image elements in illformed feeds + if (not prefix) and tag not in ('title', 'link', 'description', 'name'): + self.intextinput = 0 + if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): + self.inimage = 0 + + # call special handler (if defined) or default handler + methodname = '_start_' + prefix + suffix + try: + method = getattr(self, methodname) + return method(attrsD) + except AttributeError: + return self.push(prefix + suffix, 1) + + def unknown_endtag(self, tag): + if _debug: sys.stderr.write('end %s\n' % tag) + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # call special handler (if defined) or default handler + methodname = '_end_' + prefix + suffix + try: + method = getattr(self, methodname) + method() + except AttributeError: + self.pop(prefix + suffix) + + # track inline content + if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + # element declared itself as escaped markup, but it isn't really + self.contentparams['type'] = 'application/xhtml+xml' + if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': + tag = tag.split(':')[-1] + self.handle_data('' % tag, escape=0) + + # track xml:base and xml:lang going out of scope + if self.basestack: + self.basestack.pop() + if self.basestack and self.basestack[-1]: + self.baseuri = self.basestack[-1] + if self.langstack: + self.langstack.pop() + if self.langstack: # and (self.langstack[-1] is not None): + self.lang = self.langstack[-1] + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + if not self.elementstack: return + ref = ref.lower() + if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): + text = '&#%s;' % ref + else: + if ref[0] == 'x': + c = int(ref[1:], 16) + else: + c = int(ref) + text = unichr(c).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + if not self.elementstack: return + if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) + if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): + text = '&%s;' % ref + else: + # entity resolution graciously donated by Aaron Swartz + def name2cp(k): + import htmlentitydefs + if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 + return htmlentitydefs.name2codepoint[k] + k = htmlentitydefs.entitydefs[k] + if k.startswith('&#') and k.endswith(';'): + return int(k[2:-1]) # not in latin-1 + return ord(k) + try: name2cp(ref) + except KeyError: text = '&%s;' % ref + else: text = unichr(name2cp(ref)).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_data(self, text, escape=1): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + if not self.elementstack: return + if escape and self.contentparams.get('type') == 'application/xhtml+xml': + text = _xmlescape(text) + self.elementstack[-1][2].append(text) + + def handle_comment(self, text): + # called for each comment, e.g. + pass + + def handle_pi(self, text): + # called for each processing instruction, e.g. + pass + + def handle_decl(self, text): + pass + + def parse_declaration(self, i): + # override internal declaration handler to handle CDATA blocks + if _debug: sys.stderr.write('entering parse_declaration\n') + if self.rawdata[i:i+9] == '', i) + if k == -1: k = len(self.rawdata) + self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) + return k+3 + else: + k = self.rawdata.find('>', i) + return k+1 + + def mapContentType(self, contentType): + contentType = contentType.lower() + if contentType == 'text': + contentType = 'text/plain' + elif contentType == 'html': + contentType = 'text/html' + elif contentType == 'xhtml': + contentType = 'application/xhtml+xml' + return contentType + + def trackNamespace(self, prefix, uri): + loweruri = uri.lower() + if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: + self.version = 'rss090' + if loweruri == 'http://purl.org/rss/1.0/' and not self.version: + self.version = 'rss10' + if loweruri == 'http://www.w3.org/2005/atom' and not self.version: + self.version = 'atom10' + if loweruri.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + uri = 'http://backend.userland.com/rss' + loweruri = uri + if self._matchnamespaces.has_key(loweruri): + self.namespacemap[prefix] = self._matchnamespaces[loweruri] + self.namespacesInUse[self._matchnamespaces[loweruri]] = uri + else: + self.namespacesInUse[prefix or ''] = uri + + def resolveURI(self, uri): + return _urljoin(self.baseuri or '', uri) + + def decodeEntities(self, element, data): + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs]) + + def push(self, element, expectingText): + self.elementstack.append([element, expectingText, []]) + + def pop(self, element, stripWhitespace=1): + if not self.elementstack: return + if self.elementstack[-1][0] != element: return + + element, expectingText, pieces = self.elementstack.pop() + + if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml': + # remove enclosing child element, but only if it is a
and + # only if all the remaining content is nested underneath it. + # This means that the divs would be retained in the following: + #
foo
bar
+ if pieces and (pieces[0] == '
' or pieces[0].startswith('
': + depth = 0 + for piece in pieces[:-1]: + if piece.startswith(''): + depth += 1 + else: + pieces = pieces[1:-1] + + output = ''.join(pieces) + if stripWhitespace: + output = output.strip() + if not expectingText: return output + + # decode base64 content + if base64 and self.contentparams.get('base64', 0): + try: + output = base64.decodestring(output) + except binascii.Error: + pass + except binascii.Incomplete: + pass + + # resolve relative URIs + if (element in self.can_be_relative_uri) and output: + output = self.resolveURI(output) + + # decode entities within embedded markup + if not self.contentparams.get('base64', 0): + output = self.decodeEntities(element, output) + + # remove temporary cruft from contentparams + try: + del self.contentparams['mode'] + except KeyError: + pass + try: + del self.contentparams['base64'] + except KeyError: + pass + + # resolve relative URIs within embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_relative_uris: + output = _resolveRelativeURIs(output, self.baseuri, self.encoding) + + # sanitize embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_dangerous_markup: + output = _sanitizeHTML(output, self.encoding) + + if self.encoding and type(output) != type(u''): + try: + output = unicode(output, self.encoding) + except: + pass + + # address common error where people take data that is already + # utf-8, presume that it is iso-8859-1, and re-encode it. + if self.encoding=='utf-8' and type(output) == type(u''): + try: + output = unicode(output.encode('iso-8859-1'), 'utf-8') + except: + pass + + # map win-1252 extensions to the proper code points + if type(output) == type(u''): + output = u''.join([c in cp1252 and cp1252[c] or c for c in output]) + + # categories/tags/keywords/whatever are handled in _end_category + if element == 'category': + return output + + # store output in appropriate place(s) + if self.inentry and not self.insource: + if element == 'content': + self.entries[-1].setdefault(element, []) + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element].append(contentparams) + elif element == 'link': + self.entries[-1][element] = output + if output: + self.entries[-1]['links'][-1]['href'] = output + else: + if element == 'description': + element = 'summary' + self.entries[-1][element] = output + if self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element + '_detail'] = contentparams + elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage): + context = self._getContext() + if element == 'description': + element = 'subtitle' + context[element] = output + if element == 'link': + context['links'][-1]['href'] = output + elif self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + context[element + '_detail'] = contentparams + return output + + def pushContent(self, tag, attrsD, defaultContentType, expectingText): + self.incontent += 1 + self.contentparams = FeedParserDict({ + 'type': self.mapContentType(attrsD.get('type', defaultContentType)), + 'language': self.lang, + 'base': self.baseuri}) + self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) + self.push(tag, expectingText) + + def popContent(self, tag): + value = self.pop(tag) + self.incontent -= 1 + self.contentparams.clear() + return value + + def _mapToStandardPrefix(self, name): + colonpos = name.find(':') + if colonpos <> -1: + prefix = name[:colonpos] + suffix = name[colonpos+1:] + prefix = self.namespacemap.get(prefix, prefix) + name = prefix + ':' + suffix + return name + + def _getAttribute(self, attrsD, name): + return attrsD.get(self._mapToStandardPrefix(name)) + + def _isBase64(self, attrsD, contentparams): + if attrsD.get('mode', '') == 'base64': + return 1 + if self.contentparams['type'].startswith('text/'): + return 0 + if self.contentparams['type'].endswith('+xml'): + return 0 + if self.contentparams['type'].endswith('/xml'): + return 0 + return 1 + + def _itsAnHrefDamnIt(self, attrsD): + href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) + if href: + try: + del attrsD['url'] + except KeyError: + pass + try: + del attrsD['uri'] + except KeyError: + pass + attrsD['href'] = href + return attrsD + + def _save(self, key, value): + context = self._getContext() + context.setdefault(key, value) + + def _start_rss(self, attrsD): + versionmap = {'0.91': 'rss091u', + '0.92': 'rss092', + '0.93': 'rss093', + '0.94': 'rss094'} + if not self.version: + attr_version = attrsD.get('version', '') + version = versionmap.get(attr_version) + if version: + self.version = version + elif attr_version.startswith('2.'): + self.version = 'rss20' + else: + self.version = 'rss' + + def _start_dlhottitles(self, attrsD): + self.version = 'hotrss' + + def _start_channel(self, attrsD): + self.infeed = 1 + self._cdf_common(attrsD) + _start_feedinfo = _start_channel + + def _cdf_common(self, attrsD): + if attrsD.has_key('lastmod'): + self._start_modified({}) + self.elementstack[-1][-1] = attrsD['lastmod'] + self._end_modified() + if attrsD.has_key('href'): + self._start_link({}) + self.elementstack[-1][-1] = attrsD['href'] + self._end_link() + + def _start_feed(self, attrsD): + self.infeed = 1 + versionmap = {'0.1': 'atom01', + '0.2': 'atom02', + '0.3': 'atom03'} + if not self.version: + attr_version = attrsD.get('version') + version = versionmap.get(attr_version) + if version: + self.version = version + else: + self.version = 'atom' + + def _end_channel(self): + self.infeed = 0 + _end_feed = _end_channel + + def _start_image(self, attrsD): + self.inimage = 1 + self.push('image', 0) + context = self._getContext() + context.setdefault('image', FeedParserDict()) + + def _end_image(self): + self.pop('image') + self.inimage = 0 + + def _start_textinput(self, attrsD): + self.intextinput = 1 + self.push('textinput', 0) + context = self._getContext() + context.setdefault('textinput', FeedParserDict()) + _start_textInput = _start_textinput + + def _end_textinput(self): + self.pop('textinput') + self.intextinput = 0 + _end_textInput = _end_textinput + + def _start_author(self, attrsD): + self.inauthor = 1 + self.push('author', 1) + _start_managingeditor = _start_author + _start_dc_author = _start_author + _start_dc_creator = _start_author + _start_itunes_author = _start_author + + def _end_author(self): + self.pop('author') + self.inauthor = 0 + self._sync_author_detail() + _end_managingeditor = _end_author + _end_dc_author = _end_author + _end_dc_creator = _end_author + _end_itunes_author = _end_author + + def _start_itunes_owner(self, attrsD): + self.inpublisher = 1 + self.push('publisher', 0) + + def _end_itunes_owner(self): + self.pop('publisher') + self.inpublisher = 0 + self._sync_author_detail('publisher') + + def _start_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('contributor', 0) + + def _end_contributor(self): + self.pop('contributor') + self.incontributor = 0 + + def _start_dc_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('name', 0) + + def _end_dc_contributor(self): + self._end_name() + self.incontributor = 0 + + def _start_name(self, attrsD): + self.push('name', 0) + _start_itunes_name = _start_name + + def _end_name(self): + value = self.pop('name') + if self.inpublisher: + self._save_author('name', value, 'publisher') + elif self.inauthor: + self._save_author('name', value) + elif self.incontributor: + self._save_contributor('name', value) + elif self.intextinput: + context = self._getContext() + context['textinput']['name'] = value + _end_itunes_name = _end_name + + def _start_width(self, attrsD): + self.push('width', 0) + + def _end_width(self): + value = self.pop('width') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['width'] = value + + def _start_height(self, attrsD): + self.push('height', 0) + + def _end_height(self): + value = self.pop('height') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['height'] = value + + def _start_url(self, attrsD): + self.push('href', 1) + _start_homepage = _start_url + _start_uri = _start_url + + def _end_url(self): + value = self.pop('href') + if self.inauthor: + self._save_author('href', value) + elif self.incontributor: + self._save_contributor('href', value) + elif self.inimage: + context = self._getContext() + context['image']['href'] = value + elif self.intextinput: + context = self._getContext() + context['textinput']['link'] = value + _end_homepage = _end_url + _end_uri = _end_url + + def _start_email(self, attrsD): + self.push('email', 0) + _start_itunes_email = _start_email + + def _end_email(self): + value = self.pop('email') + if self.inpublisher: + self._save_author('email', value, 'publisher') + elif self.inauthor: + self._save_author('email', value) + elif self.incontributor: + self._save_contributor('email', value) + _end_itunes_email = _end_email + + def _getContext(self): + if self.insource: + context = self.sourcedata + elif self.inentry: + context = self.entries[-1] + else: + context = self.feeddata + return context + + def _save_author(self, key, value, prefix='author'): + context = self._getContext() + context.setdefault(prefix + '_detail', FeedParserDict()) + context[prefix + '_detail'][key] = value + self._sync_author_detail() + + def _save_contributor(self, key, value): + context = self._getContext() + context.setdefault('contributors', [FeedParserDict()]) + context['contributors'][-1][key] = value + + def _sync_author_detail(self, key='author'): + context = self._getContext() + detail = context.get('%s_detail' % key) + if detail: + name = detail.get('name') + email = detail.get('email') + if name and email: + context[key] = '%s (%s)' % (name, email) + elif name: + context[key] = name + elif email: + context[key] = email + else: + author = context.get(key) + if not author: return + emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author) + if not emailmatch: return + email = emailmatch.group(0) + # probably a better way to do the following, but it passes all the tests + author = author.replace(email, '') + author = author.replace('()', '') + author = author.strip() + if author and (author[0] == '('): + author = author[1:] + if author and (author[-1] == ')'): + author = author[:-1] + author = author.strip() + context.setdefault('%s_detail' % key, FeedParserDict()) + context['%s_detail' % key]['name'] = author + context['%s_detail' % key]['email'] = email + + def _start_subtitle(self, attrsD): + self.pushContent('subtitle', attrsD, 'text/plain', 1) + _start_tagline = _start_subtitle + _start_itunes_subtitle = _start_subtitle + + def _end_subtitle(self): + self.popContent('subtitle') + _end_tagline = _end_subtitle + _end_itunes_subtitle = _end_subtitle + + def _start_rights(self, attrsD): + self.pushContent('rights', attrsD, 'text/plain', 1) + _start_dc_rights = _start_rights + _start_copyright = _start_rights + + def _end_rights(self): + self.popContent('rights') + _end_dc_rights = _end_rights + _end_copyright = _end_rights + + def _start_item(self, attrsD): + self.entries.append(FeedParserDict()) + self.push('item', 0) + self.inentry = 1 + self.guidislink = 0 + id = self._getAttribute(attrsD, 'rdf:about') + if id: + context = self._getContext() + context['id'] = id + self._cdf_common(attrsD) + _start_entry = _start_item + _start_product = _start_item + + def _end_item(self): + self.pop('item') + self.inentry = 0 + _end_entry = _end_item + + def _start_dc_language(self, attrsD): + self.push('language', 1) + _start_language = _start_dc_language + + def _end_dc_language(self): + self.lang = self.pop('language') + _end_language = _end_dc_language + + def _start_dc_publisher(self, attrsD): + self.push('publisher', 1) + _start_webmaster = _start_dc_publisher + + def _end_dc_publisher(self): + self.pop('publisher') + self._sync_author_detail('publisher') + _end_webmaster = _end_dc_publisher + + def _start_published(self, attrsD): + self.push('published', 1) + _start_dcterms_issued = _start_published + _start_issued = _start_published + + def _end_published(self): + value = self.pop('published') + self._save('published_parsed', _parse_date(value)) + _end_dcterms_issued = _end_published + _end_issued = _end_published + + def _start_updated(self, attrsD): + self.push('updated', 1) + _start_modified = _start_updated + _start_dcterms_modified = _start_updated + _start_pubdate = _start_updated + _start_dc_date = _start_updated + + def _end_updated(self): + value = self.pop('updated') + parsed_value = _parse_date(value) + self._save('updated_parsed', parsed_value) + _end_modified = _end_updated + _end_dcterms_modified = _end_updated + _end_pubdate = _end_updated + _end_dc_date = _end_updated + + def _start_created(self, attrsD): + self.push('created', 1) + _start_dcterms_created = _start_created + + def _end_created(self): + value = self.pop('created') + self._save('created_parsed', _parse_date(value)) + _end_dcterms_created = _end_created + + def _start_expirationdate(self, attrsD): + self.push('expired', 1) + + def _end_expirationdate(self): + self._save('expired_parsed', _parse_date(self.pop('expired'))) + + def _start_cc_license(self, attrsD): + self.push('license', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('license') + + def _start_creativecommons_license(self, attrsD): + self.push('license', 1) + + def _end_creativecommons_license(self): + self.pop('license') + + def _addTag(self, term, scheme, label): + context = self._getContext() + tags = context.setdefault('tags', []) + if (not term) and (not scheme) and (not label): return + value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) + if value not in tags: + tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) + + def _start_category(self, attrsD): + if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) + term = attrsD.get('term') + scheme = attrsD.get('scheme', attrsD.get('domain')) + label = attrsD.get('label') + self._addTag(term, scheme, label) + self.push('category', 1) + _start_dc_subject = _start_category + _start_keywords = _start_category + + def _end_itunes_keywords(self): + for term in self.pop('itunes_keywords').split(): + self._addTag(term, 'http://www.itunes.com/', None) + + def _start_itunes_category(self, attrsD): + self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) + self.push('category', 1) + + def _end_category(self): + value = self.pop('category') + if not value: return + context = self._getContext() + tags = context['tags'] + if value and len(tags) and not tags[-1]['term']: + tags[-1]['term'] = value + else: + self._addTag(value, None, None) + _end_dc_subject = _end_category + _end_keywords = _end_category + _end_itunes_category = _end_category + + def _start_cloud(self, attrsD): + self._getContext()['cloud'] = FeedParserDict(attrsD) + + def _start_link(self, attrsD): + attrsD.setdefault('rel', 'alternate') + attrsD.setdefault('type', 'text/html') + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + expectingText = self.infeed or self.inentry or self.insource + context = self._getContext() + context.setdefault('links', []) + context['links'].append(FeedParserDict(attrsD)) + if attrsD['rel'] == 'enclosure': + self._start_enclosure(attrsD) + if attrsD.has_key('href'): + expectingText = 0 + if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): + context['link'] = attrsD['href'] + else: + self.push('link', expectingText) + _start_producturl = _start_link + + def _end_link(self): + value = self.pop('link') + context = self._getContext() + if self.intextinput: + context['textinput']['link'] = value + if self.inimage: + context['image']['link'] = value + _end_producturl = _end_link + + def _start_guid(self, attrsD): + self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') + self.push('id', 1) + + def _end_guid(self): + value = self.pop('id') + self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) + if self.guidislink: + # guid acts as link, but only if 'ispermalink' is not present or is 'true', + # and only if the item doesn't already have a link element + self._save('link', value) + + def _start_title(self, attrsD): + self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + _start_dc_title = _start_title + _start_media_title = _start_title + + def _end_title(self): + value = self.popContent('title') + context = self._getContext() + if self.intextinput: + context['textinput']['title'] = value + elif self.inimage: + context['image']['title'] = value + _end_dc_title = _end_title + _end_media_title = _end_title + + def _start_description(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) + + def _start_abstract(self, attrsD): + self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + + def _end_description(self): + if self._summaryKey == 'content': + self._end_content() + else: + value = self.popContent('description') + context = self._getContext() + if self.intextinput: + context['textinput']['description'] = value + elif self.inimage: + context['image']['description'] = value + self._summaryKey = None + _end_abstract = _end_description + + def _start_info(self, attrsD): + self.pushContent('info', attrsD, 'text/plain', 1) + _start_feedburner_browserfriendly = _start_info + + def _end_info(self): + self.popContent('info') + _end_feedburner_browserfriendly = _end_info + + def _start_generator(self, attrsD): + if attrsD: + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + self._getContext()['generator_detail'] = FeedParserDict(attrsD) + self.push('generator', 1) + + def _end_generator(self): + value = self.pop('generator') + context = self._getContext() + if context.has_key('generator_detail'): + context['generator_detail']['name'] = value + + def _start_admin_generatoragent(self, attrsD): + self.push('generator', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('generator') + self._getContext()['generator_detail'] = FeedParserDict({'href': value}) + + def _start_admin_errorreportsto(self, attrsD): + self.push('errorreportsto', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('errorreportsto') + + def _start_summary(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self._summaryKey = 'summary' + self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) + _start_itunes_summary = _start_summary + + def _end_summary(self): + if self._summaryKey == 'content': + self._end_content() + else: + self.popContent(self._summaryKey or 'summary') + self._summaryKey = None + _end_itunes_summary = _end_summary + + def _start_enclosure(self, attrsD): + attrsD = self._itsAnHrefDamnIt(attrsD) + self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) + href = attrsD.get('href') + if href: + context = self._getContext() + if not context.get('id'): + context['id'] = href + + def _start_source(self, attrsD): + self.insource = 1 + + def _end_source(self): + self.insource = 0 + self._getContext()['source'] = copy.deepcopy(self.sourcedata) + self.sourcedata.clear() + + def _start_content(self, attrsD): + self.pushContent('content', attrsD, 'text/plain', 1) + src = attrsD.get('src') + if src: + self.contentparams['src'] = src + self.push('content', 1) + + def _start_prodlink(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + + def _start_body(self, attrsD): + self.pushContent('content', attrsD, 'application/xhtml+xml', 1) + _start_xhtml_body = _start_body + + def _start_content_encoded(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + _start_fullitem = _start_content_encoded + + def _end_content(self): + copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) + value = self.popContent('content') + if copyToDescription: + self._save('description', value) + _end_body = _end_content + _end_xhtml_body = _end_content + _end_content_encoded = _end_content + _end_fullitem = _end_content + _end_prodlink = _end_content + + def _start_itunes_image(self, attrsD): + self.push('itunes_image', 0) + self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) + _start_itunes_link = _start_itunes_image + + def _end_itunes_block(self): + value = self.pop('itunes_block', 0) + self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 + + def _end_itunes_explicit(self): + value = self.pop('itunes_explicit', 0) + self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 + +if _XML_AVAILABLE: + class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): + def __init__(self, baseuri, baselang, encoding): + if _debug: sys.stderr.write('trying StrictFeedParser\n') + xml.sax.handler.ContentHandler.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + self.bozo = 0 + self.exc = None + + def startPrefixMapping(self, prefix, uri): + self.trackNamespace(prefix, uri) + + def startElementNS(self, name, qname, attrs): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if lowernamespace.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + namespace = 'http://backend.userland.com/rss' + lowernamespace = namespace + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = None + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): + raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) + + # qname implementation is horribly broken in Python 2.1 (it + # doesn't report any), and slightly broken in Python 2.2 (it + # doesn't report the xml: namespace). So we match up namespaces + # with a known list first, and then possibly override them with + # the qnames the SAX parser gives us (if indeed it gives us any + # at all). Thanks to MatejC for helping me test this and + # tirelessly telling me that it didn't work yet. + attrsD = {} + for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): + lowernamespace = (namespace or '').lower() + prefix = self._matchnamespaces.get(lowernamespace, '') + if prefix: + attrlocalname = prefix + ':' + attrlocalname + attrsD[str(attrlocalname).lower()] = attrvalue + for qname in attrs.getQNames(): + attrsD[str(qname).lower()] = attrs.getValueByQName(qname) + self.unknown_starttag(localname, attrsD.items()) + + def characters(self, text): + self.handle_data(text) + + def endElementNS(self, name, qname): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = '' + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + self.unknown_endtag(localname) + + def error(self, exc): + self.bozo = 1 + self.exc = exc + + def fatalError(self, exc): + self.error(exc) + raise exc + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + def __init__(self, encoding): + self.encoding = encoding + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + def feed(self, data): + data = re.compile(r'', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace + data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) + data = data.replace(''', "'") + data = data.replace('"', '"') + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + sgmllib.SGMLParser.close(self) + + def normalize_attrs(self, attrs): + # utility method to be called by descendants + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
, tag='pre', attrs=[('class', 'screen')]
+        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
+        uattrs = []
+        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
+        for key, value in attrs:
+            if type(value) != type(u''):
+                value = unicode(value, self.encoding)
+            uattrs.append((unicode(key, self.encoding), value))
+        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
+        if tag in self.elements_no_end_tag:
+            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
+        else:
+            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
+
+    def unknown_endtag(self, tag):
+        # called for each end tag, e.g. for 
, tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + import htmlentitydefs + if not hasattr(htmlentitydefs, 'name2codepoint') or htmlentitydefs.name2codepoint.has_key(ref): + self.pieces.append('&%(ref)s;' % locals()) + else: + self.pieces.append('&%(ref)s' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): + def __init__(self, baseuri, baselang, encoding): + sgmllib.SGMLParser.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + + def decodeEntities(self, element, data): + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace('"', '"') + data = data.replace(''', ''') + data = data.replace(''', ''') + if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace(''', "'") + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % t for t in attrs]) + +class _RelativeURIResolver(_BaseHTMLProcessor): + relative_uris = [('a', 'href'), + ('applet', 'codebase'), + ('area', 'href'), + ('blockquote', 'cite'), + ('body', 'background'), + ('del', 'cite'), + ('form', 'action'), + ('frame', 'longdesc'), + ('frame', 'src'), + ('iframe', 'longdesc'), + ('iframe', 'src'), + ('head', 'profile'), + ('img', 'longdesc'), + ('img', 'src'), + ('img', 'usemap'), + ('input', 'src'), + ('input', 'usemap'), + ('ins', 'cite'), + ('link', 'href'), + ('object', 'classid'), + ('object', 'codebase'), + ('object', 'data'), + ('object', 'usemap'), + ('q', 'cite'), + ('script', 'src')] + + def __init__(self, baseuri, encoding): + _BaseHTMLProcessor.__init__(self, encoding) + self.baseuri = baseuri + + def resolveURI(self, uri): + return _urljoin(self.baseuri, uri) + + def unknown_starttag(self, tag, attrs): + attrs = self.normalize_attrs(attrs) + attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + +def _resolveRelativeURIs(htmlSource, baseURI, encoding): + if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') + p = _RelativeURIResolver(baseURI, encoding) + p.feed(htmlSource) + return p.output() + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', + 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', + 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', + 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', + 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', + 'thead', 'tr', 'tt', 'u', 'ul', 'var'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', + 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', + 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', + 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', + 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', + 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', + 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', + 'usemap', 'valign', 'value', 'vspace', 'width', 'xml:lang'] + + unacceptable_elements_with_end_tag = ['script', 'applet'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.unacceptablestack = 0 + + def unknown_starttag(self, tag, attrs): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack += 1 + return + attrs = self.normalize_attrs(attrs) + attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + + def unknown_endtag(self, tag): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack -= 1 + return + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.unacceptablestack: + _BaseHTMLProcessor.handle_data(self, text) + +def _sanitizeHTML(htmlSource, encoding): + p = _HTMLSanitizer(encoding) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count(''): + data = data.split('>', 1)[1] + if data.count('= '2.3.3' + assert base64 != None + user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') + realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] + self.add_password(realm, host, user, passw) + retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) + self.reset_retry_count() + return retry + except: + return self.http_error_default(req, fp, code, msg, headers) + +def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): + """URL, filename, or string --> stream + + This function lets you define parsers that take any input source + (URL, pathname to local or network file, or actual data as a string) + and deal with it in a uniform manner. Returned object is guaranteed + to have all the basic stdio read methods (read, readline, readlines). + Just .close() the object when you're done with it. + + If the etag argument is supplied, it will be used as the value of an + If-None-Match request header. + + If the modified argument is supplied, it must be a tuple of 9 integers + as returned by gmtime() in the standard Python time module. This MUST + be in GMT (Greenwich Mean Time). The formatted date/time will be used + as the value of an If-Modified-Since request header. + + If the agent argument is supplied, it will be used as the value of a + User-Agent request header. + + If the referrer argument is supplied, it will be used as the value of a + Referer[sic] request header. + + If handlers is supplied, it is a list of handlers used to build a + urllib2 opener. + """ + + if hasattr(url_file_stream_or_string, 'read'): + return url_file_stream_or_string + + if url_file_stream_or_string == '-': + return sys.stdin + + if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): + if not agent: + agent = USER_AGENT + # test for inline user:password for basic auth + auth = None + if base64: + urltype, rest = urllib.splittype(url_file_stream_or_string) + realhost, rest = urllib.splithost(rest) + if realhost: + user_passwd, realhost = urllib.splituser(realhost) + if user_passwd: + url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) + auth = base64.encodestring(user_passwd).strip() + # try to open with urllib2 (to use optional headers) + request = urllib2.Request(url_file_stream_or_string) + request.add_header('User-Agent', agent) + if etag: + request.add_header('If-None-Match', etag) + if modified: + # format into an RFC 1123-compliant timestamp. We can't use + # time.strftime() since the %a and %b directives can be affected + # by the current locale, but RFC 2616 states that dates must be + # in English. + short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) + if referrer: + request.add_header('Referer', referrer) + if gzip and zlib: + request.add_header('Accept-encoding', 'gzip, deflate') + elif gzip: + request.add_header('Accept-encoding', 'gzip') + elif zlib: + request.add_header('Accept-encoding', 'deflate') + else: + request.add_header('Accept-encoding', '') + if auth: + request.add_header('Authorization', 'Basic %s' % auth) + if ACCEPT_HEADER: + request.add_header('Accept', ACCEPT_HEADER) + request.add_header('A-IM', 'feed') # RFC 3229 support + opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) + opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent + try: + return opener.open(request) + finally: + opener.close() # JohnD + + # try to open with native open function (if url_file_stream_or_string is a filename) + try: + return open(url_file_stream_or_string) + except: + pass + + # treat url_file_stream_or_string as string + return _StringIO(str(url_file_stream_or_string)) + +_date_handlers = [] +def registerDateHandler(func): + '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' + _date_handlers.insert(0, func) + +# ISO-8601 date parsing routines written by Fazal Majid. +# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 +# parser is beyond the scope of feedparser and would be a worthwhile addition +# to the Python library. +# A single regular expression cannot parse ISO 8601 date formats into groups +# as the standard is highly irregular (for instance is 030104 2003-01-04 or +# 0301-04-01), so we use templates instead. +# Please note the order in templates is significant because we need a +# greedy match. +_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', + 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', + '-YY-?MM', '-OOO', '-YY', + '--MM-?DD', '--MM', + '---DD', + 'CC', ''] +_iso8601_re = [ + tmpl.replace( + 'YYYY', r'(?P\d{4})').replace( + 'YY', r'(?P\d\d)').replace( + 'MM', r'(?P[01]\d)').replace( + 'DD', r'(?P[0123]\d)').replace( + 'OOO', r'(?P[0123]\d\d)').replace( + 'CC', r'(?P\d\d$)') + + r'(T?(?P\d{2}):(?P\d{2})' + + r'(:(?P\d{2}))?' + + r'(?P[+-](?P\d{2})(:(?P\d{2}))?|Z)?)?' + for tmpl in _iso8601_tmpl] +del tmpl +_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] +del regex +def _parse_date_iso8601(dateString): + '''Parse a variety of ISO-8601-compatible formats like 20040105''' + m = None + for _iso8601_match in _iso8601_matches: + m = _iso8601_match(dateString) + if m: break + if not m: return + if m.span() == (0, 0): return + params = m.groupdict() + ordinal = params.get('ordinal', 0) + if ordinal: + ordinal = int(ordinal) + else: + ordinal = 0 + year = params.get('year', '--') + if not year or year == '--': + year = time.gmtime()[0] + elif len(year) == 2: + # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 + year = 100 * int(time.gmtime()[0] / 100) + int(year) + else: + year = int(year) + month = params.get('month', '-') + if not month or month == '-': + # ordinals are NOT normalized by mktime, we simulate them + # by setting month=1, day=ordinal + if ordinal: + month = 1 + else: + month = time.gmtime()[1] + month = int(month) + day = params.get('day', 0) + if not day: + # see above + if ordinal: + day = ordinal + elif params.get('century', 0) or \ + params.get('year', 0) or params.get('month', 0): + day = 1 + else: + day = time.gmtime()[2] + else: + day = int(day) + # special case of the century - is the first year of the 21st century + # 2000 or 2001 ? The debate goes on... + if 'century' in params.keys(): + year = (int(params['century']) - 1) * 100 + 1 + # in ISO 8601 most fields are optional + for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: + if not params.get(field, None): + params[field] = 0 + hour = int(params.get('hour', 0)) + minute = int(params.get('minute', 0)) + second = int(params.get('second', 0)) + # weekday is normalized by mktime(), we can ignore it + weekday = 0 + # daylight savings is complex, but not needed for feedparser's purposes + # as time zones, if specified, include mention of whether it is active + # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and + # and most implementations have DST bugs + daylight_savings_flag = 0 + tm = [year, month, day, hour, minute, second, weekday, + ordinal, daylight_savings_flag] + # ISO 8601 time zone adjustments + tz = params.get('tz') + if tz and tz != 'Z': + if tz[0] == '-': + tm[3] += int(params.get('tzhour', 0)) + tm[4] += int(params.get('tzmin', 0)) + elif tz[0] == '+': + tm[3] -= int(params.get('tzhour', 0)) + tm[4] -= int(params.get('tzmin', 0)) + else: + return None + # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) + # which is guaranteed to normalize d/m/y/h/m/s. + # Many implementations have bugs, but we'll pretend they don't. + return time.localtime(time.mktime(tm)) +registerDateHandler(_parse_date_iso8601) + +# 8-bit date handling routines written by ytrewq1. +_korean_year = u'\ub144' # b3e2 in euc-kr +_korean_month = u'\uc6d4' # bff9 in euc-kr +_korean_day = u'\uc77c' # c0cf in euc-kr +_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr +_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr + +_korean_onblog_date_re = \ + re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ + (_korean_year, _korean_month, _korean_day)) +_korean_nate_date_re = \ + re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ + (_korean_am, _korean_pm)) +def _parse_date_onblog(dateString): + '''Parse a string according to the OnBlog 8-bit date format''' + m = _korean_onblog_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_onblog) + +def _parse_date_nate(dateString): + '''Parse a string according to the Nate 8-bit date format''' + m = _korean_nate_date_re.match(dateString) + if not m: return + hour = int(m.group(5)) + ampm = m.group(4) + if (ampm == _korean_pm): + hour += 12 + hour = str(hour) + if len(hour) == 1: + hour = '0' + hour + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_nate) + +_mssql_date_re = \ + re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') +def _parse_date_mssql(dateString): + '''Parse a string according to the MS SQL date format''' + m = _mssql_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_mssql) + +# Unicode strings for Greek date strings +_greek_months = \ + { \ + u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 + u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 + u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 + u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 + u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 + u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 + u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 + u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 + u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 + u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 + u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 + u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 + u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 + u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 + u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 + u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 + u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 + } + +_greek_wdays = \ + { \ + u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 + u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 + u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 + u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 + u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 + u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 + u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 + } + +_greek_date_format_re = \ + re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') + +def _parse_date_greek(dateString): + '''Parse a string according to a Greek 8-bit date format.''' + m = _greek_date_format_re.match(dateString) + if not m: return + try: + wday = _greek_wdays[m.group(1)] + month = _greek_months[m.group(3)] + except: + return + rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ + {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ + 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': m.group(8)} + if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) + return _parse_date_rfc822(rfc822date) +registerDateHandler(_parse_date_greek) + +# Unicode strings for Hungarian date strings +_hungarian_months = \ + { \ + u'janu\u00e1r': u'01', # e1 in iso-8859-2 + u'febru\u00e1ri': u'02', # e1 in iso-8859-2 + u'm\u00e1rcius': u'03', # e1 in iso-8859-2 + u'\u00e1prilis': u'04', # e1 in iso-8859-2 + u'm\u00e1ujus': u'05', # e1 in iso-8859-2 + u'j\u00fanius': u'06', # fa in iso-8859-2 + u'j\u00falius': u'07', # fa in iso-8859-2 + u'augusztus': u'08', + u'szeptember': u'09', + u'okt\u00f3ber': u'10', # f3 in iso-8859-2 + u'november': u'11', + u'december': u'12', + } + +_hungarian_date_format_re = \ + re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') + +def _parse_date_hungarian(dateString): + '''Parse a string according to a Hungarian 8-bit date format.''' + m = _hungarian_date_format_re.match(dateString) + if not m: return + try: + month = _hungarian_months[m.group(2)] + day = m.group(3) + if len(day) == 1: + day = '0' + day + hour = m.group(4) + if len(hour) == 1: + hour = '0' + hour + except: + return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ + {'year': m.group(1), 'month': month, 'day': day,\ + 'hour': hour, 'minute': m.group(5),\ + 'zonediff': m.group(6)} + if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_hungarian) + +# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by +# Drake and licensed under the Python license. Removed all range checking +# for month, day, hour, minute, and second, since mktime will normalize +# these later +def _parse_date_w3dtf(dateString): + def __extract_date(m): + year = int(m.group('year')) + if year < 100: + year = 100 * int(time.gmtime()[0] / 100) + int(year) + if year < 1000: + return 0, 0, 0 + julian = m.group('julian') + if julian: + julian = int(julian) + month = julian / 30 + 1 + day = julian % 30 + 1 + jday = None + while jday != julian: + t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) + jday = time.gmtime(t)[-2] + diff = abs(jday - julian) + if jday > julian: + if diff < day: + day = day - diff + else: + month = month - 1 + day = 31 + elif jday < julian: + if day + diff < 28: + day = day + diff + else: + month = month + 1 + return year, month, day + month = m.group('month') + day = 1 + if month is None: + month = 1 + else: + month = int(month) + day = m.group('day') + if day: + day = int(day) + else: + day = 1 + return year, month, day + + def __extract_time(m): + if not m: + return 0, 0, 0 + hours = m.group('hours') + if not hours: + return 0, 0, 0 + hours = int(hours) + minutes = int(m.group('minutes')) + seconds = m.group('seconds') + if seconds: + seconds = int(seconds) + else: + seconds = 0 + return hours, minutes, seconds + + def __extract_tzd(m): + '''Return the Time Zone Designator as an offset in seconds from UTC.''' + if not m: + return 0 + tzd = m.group('tzd') + if not tzd: + return 0 + if tzd == 'Z': + return 0 + hours = int(m.group('tzdhours')) + minutes = m.group('tzdminutes') + if minutes: + minutes = int(minutes) + else: + minutes = 0 + offset = (hours*60 + minutes) * 60 + if tzd[0] == '+': + return -offset + return offset + + __date_re = ('(?P\d\d\d\d)' + '(?:(?P-|)' + '(?:(?P\d\d\d)' + '|(?P\d\d)(?:(?P=dsep)(?P\d\d))?))?') + __tzd_re = '(?P[-+](?P\d\d)(?::?(?P\d\d))|Z)' + __tzd_rx = re.compile(__tzd_re) + __time_re = ('(?P\d\d)(?P:|)(?P\d\d)' + '(?:(?P=tsep)(?P\d\d(?:[.,]\d+)?))?' + + __tzd_re) + __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) + __datetime_rx = re.compile(__datetime_re) + m = __datetime_rx.match(dateString) + if (m is None) or (m.group() != dateString): return + gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) + if gmt[0] == 0: return + return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) +registerDateHandler(_parse_date_w3dtf) + +def _parse_date_rfc822(dateString): + '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' + data = dateString.split() + if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: + del data[0] + if len(data) == 4: + s = data[3] + i = s.find('+') + if i > 0: + data[3:] = [s[:i], s[i+1:]] + else: + data.append('') + dateString = " ".join(data) + if len(data) < 5: + dateString += ' 00:00:00 GMT' + tm = rfc822.parsedate_tz(dateString) + if tm: + return time.gmtime(rfc822.mktime_tz(tm)) +# rfc822.py defines several time zones, but we define some extra ones. +# 'ET' is equivalent to 'EST', etc. +_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} +rfc822._timezones.update(_additional_timezones) +registerDateHandler(_parse_date_rfc822) + +def _parse_date(dateString): + '''Parses a variety of date formats into a 9-tuple in GMT''' + for handler in _date_handlers: + try: + date9tuple = handler(dateString) + if not date9tuple: continue + if len(date9tuple) != 9: + if _debug: sys.stderr.write('date handler function must return 9-tuple\n') + raise ValueError + map(int, date9tuple) + return date9tuple + except Exception, e: + if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) + pass + return None + +def _getCharacterEncoding(http_headers, xml_data): + '''Get the character encoding of the XML document + + http_headers is a dictionary + xml_data is a raw string (not Unicode) + + This is so much trickier than it sounds, it's not even funny. + According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type + is application/xml, application/*+xml, + application/xml-external-parsed-entity, or application/xml-dtd, + the encoding given in the charset parameter of the HTTP Content-Type + takes precedence over the encoding given in the XML prefix within the + document, and defaults to 'utf-8' if neither are specified. But, if + the HTTP Content-Type is text/xml, text/*+xml, or + text/xml-external-parsed-entity, the encoding given in the XML prefix + within the document is ALWAYS IGNORED and only the encoding given in + the charset parameter of the HTTP Content-Type header should be + respected, and it defaults to 'us-ascii' if not specified. + + Furthermore, discussion on the atom-syntax mailing list with the + author of RFC 3023 leads me to the conclusion that any document + served with a Content-Type of text/* and no charset parameter + must be treated as us-ascii. (We now do this.) And also that it + must always be flagged as non-well-formed. (We now do this too.) + + If Content-Type is unspecified (input was local file or non-HTTP source) + or unrecognized (server just got it totally wrong), then go by the + encoding given in the XML prefix of the document and default to + 'iso-8859-1' as per the HTTP specification (RFC 2616). + + Then, assuming we didn't find a character encoding in the HTTP headers + (and the HTTP Content-type allowed us to look in the body), we need + to sniff the first few bytes of the XML data and try to determine + whether the encoding is ASCII-compatible. Section F of the XML + specification shows the way here: + http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + + If the sniffed encoding is not ASCII-compatible, we need to make it + ASCII compatible so that we can sniff further into the XML declaration + to find the encoding attribute, which will tell us the true encoding. + + Of course, none of this guarantees that we will be able to parse the + feed in the declared character encoding (assuming it was declared + correctly, which many are not). CJKCodecs and iconv_codec help a lot; + you should definitely install them if you can. + http://cjkpython.i18n.org/ + ''' + + def _parseHTTPContentType(content_type): + '''takes HTTP Content-Type header and returns (content type, charset) + + If no charset is specified, returns (content type, '') + If no content type is specified, returns ('', '') + Both return parameters are guaranteed to be lowercase strings + ''' + content_type = content_type or '' + content_type, params = cgi.parse_header(content_type) + return content_type, params.get('charset', '').replace("'", '') + + sniffed_xml_encoding = '' + xml_encoding = '' + true_encoding = '' + http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) + # Must sniff for non-ASCII-compatible character encodings before + # searching for XML declaration. This heuristic is defined in + # section F of the XML specification: + # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + try: + if xml_data[:4] == '\x4c\x6f\xa7\x94': + # EBCDIC + xml_data = _ebcdic_to_ascii(xml_data) + elif xml_data[:4] == '\x00\x3c\x00\x3f': + # UTF-16BE + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): + # UTF-16BE with BOM + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x3f\x00': + # UTF-16LE + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): + # UTF-16LE with BOM + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\x00\x3c': + # UTF-32BE + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x00\x00': + # UTF-32LE + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\xfe\xff': + # UTF-32BE with BOM + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\xff\xfe\x00\x00': + # UTF-32LE with BOM + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') + elif xml_data[:3] == '\xef\xbb\xbf': + # UTF-8 with BOM + sniffed_xml_encoding = 'utf-8' + xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') + else: + # ASCII-compatible + pass + xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) + except: + xml_encoding_match = None + if xml_encoding_match: + xml_encoding = xml_encoding_match.groups()[0].lower() + if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): + xml_encoding = sniffed_xml_encoding + acceptable_content_type = 0 + application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') + text_content_types = ('text/xml', 'text/xml-external-parsed-entity') + if (http_content_type in application_content_types) or \ + (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): + acceptable_content_type = 1 + true_encoding = http_encoding or xml_encoding or 'utf-8' + elif (http_content_type in text_content_types) or \ + (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): + acceptable_content_type = 1 + true_encoding = http_encoding or 'us-ascii' + elif http_content_type.startswith('text/'): + true_encoding = http_encoding or 'us-ascii' + elif http_headers and (not http_headers.has_key('content-type')): + true_encoding = xml_encoding or 'iso-8859-1' + else: + true_encoding = xml_encoding or 'utf-8' + return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type + +def _toUTF8(data, encoding): + '''Changes an XML data stream on the fly to specify a new encoding + + data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already + encoding is a string recognized by encodings.aliases + ''' + if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) + # strip Byte Order Mark (if present) + if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16be': + sys.stderr.write('trying utf-16be instead\n') + encoding = 'utf-16be' + data = data[2:] + elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16le': + sys.stderr.write('trying utf-16le instead\n') + encoding = 'utf-16le' + data = data[2:] + elif data[:3] == '\xef\xbb\xbf': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-8': + sys.stderr.write('trying utf-8 instead\n') + encoding = 'utf-8' + data = data[3:] + elif data[:4] == '\x00\x00\xfe\xff': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32be': + sys.stderr.write('trying utf-32be instead\n') + encoding = 'utf-32be' + data = data[4:] + elif data[:4] == '\xff\xfe\x00\x00': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32le': + sys.stderr.write('trying utf-32le instead\n') + encoding = 'utf-32le' + data = data[4:] + newdata = unicode(data, encoding) + if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) + declmatch = re.compile('^<\?xml[^>]*?>') + newdecl = '''''' + if declmatch.search(newdata): + newdata = declmatch.sub(newdecl, newdata) + else: + newdata = newdecl + u'\n' + newdata + return newdata.encode('utf-8') + +def _stripDoctype(data): + '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) + + rss_version may be 'rss091n' or None + stripped_data is the same XML document, minus the DOCTYPE + ''' + entity_pattern = re.compile(r']*?)>', re.MULTILINE) + data = entity_pattern.sub('', data) + doctype_pattern = re.compile(r']*?)>', re.MULTILINE) + doctype_results = doctype_pattern.findall(data) + doctype = doctype_results and doctype_results[0] or '' + if doctype.lower().count('netscape'): + version = 'rss091n' + else: + version = None + data = doctype_pattern.sub('', data) + return version, data + +def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): + '''Parse a feed from a URL, file, stream, or string''' + result = FeedParserDict() + result['feed'] = FeedParserDict() + result['entries'] = [] + if _XML_AVAILABLE: + result['bozo'] = 0 + if type(handlers) == types.InstanceType: + handlers = [handlers] + try: + f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) + data = f.read() + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + f = None + + # if feed is gzip-compressed, decompress it + if f and data and hasattr(f, 'headers'): + if gzip and f.headers.get('content-encoding', '') == 'gzip': + try: + data = gzip.GzipFile(fileobj=_StringIO(data)).read() + except Exception, e: + # Some feeds claim to be gzipped but they're not, so + # we get garbage. Ideally, we should re-request the + # feed without the 'Accept-encoding: gzip' header, + # but we don't. + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + elif zlib and f.headers.get('content-encoding', '') == 'deflate': + try: + data = zlib.decompress(data, -zlib.MAX_WBITS) + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + + # save HTTP headers + if hasattr(f, 'info'): + info = f.info() + result['etag'] = info.getheader('ETag') + last_modified = info.getheader('Last-Modified') + if last_modified: + result['modified'] = _parse_date(last_modified) + if hasattr(f, 'url'): + result['href'] = f.url + result['status'] = 200 + if hasattr(f, 'status'): + result['status'] = f.status + if hasattr(f, 'headers'): + result['headers'] = f.headers.dict + if hasattr(f, 'close'): + f.close() + + # there are four encodings to keep track of: + # - http_encoding is the encoding declared in the Content-Type HTTP header + # - xml_encoding is the encoding declared in the ; changed +# project name +#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); +# removed unnecessary urllib code -- urllib2 should always be available anyway; +# return actual url, status, and full HTTP headers (as result['url'], +# result['status'], and result['headers']) if parsing a remote feed over HTTP -- +# this should pass all the HTTP tests at ; +# added the latest namespace-of-the-week for RSS 2.0 +#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom +# User-Agent (otherwise urllib2 sends two, which confuses some servers) +#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for +# inline and as used in some RSS 2.0 feeds +#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or +# textInput, and also to return the character encoding (if specified) +#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking +# nested divs within content (JohnD); fixed missing sys import (JohanS); +# fixed regular expression to capture XML character encoding (Andrei); +# added support for Atom 0.3-style links; fixed bug with textInput tracking; +# added support for cloud (MartijnP); added support for multiple +# category/dc:subject (MartijnP); normalize content model: 'description' gets +# description (which can come from description, summary, or full content if no +# description), 'content' gets dict of base/language/type/value (which can come +# from content:encoded, xhtml:body, content, or fullitem); +# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang +# tracking; fixed bug tracking unknown tags; fixed bug tracking content when +# element is not in default namespace (like Pocketsoap feed); +# resolve relative URLs in link, guid, docs, url, comments, wfw:comment, +# wfw:commentRSS; resolve relative URLs within embedded HTML markup in +# description, xhtml:body, content, content:encoded, title, subtitle, +# summary, info, tagline, and copyright; added support for pingback and +# trackback namespaces +#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback +# namespaces, as opposed to 2.6 when I said I did but didn't really; +# sanitize HTML markup within some elements; added mxTidy support (if +# installed) to tidy HTML markup within some elements; fixed indentation +# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available +# (FazalM); universal date parsing and normalization (FazalM): 'created', modified', +# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', +# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' +# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa +#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory +# leak not closing url opener (JohnD); added dc:publisher support (MarekK); +# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) +#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed
tags in +# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); +# fixed relative URI processing for guid (skadz); added ICBM support; added +# base64 support +#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many +# blogspot.com sites); added _debug variable +#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing +#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); +# added several new supported namespaces; fixed bug tracking naked markup in +# description; added support for enclosure; added support for source; re-added +# support for cloud which got dropped somehow; added support for expirationDate +#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking +# xml:base URI, one for documents that don't define one explicitly and one for +# documents that define an outer and an inner xml:base that goes out of scope +# before the end of the document +#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level +#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] +# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; +# added support for creativeCommons:license and cc:license; added support for +# full Atom content model in title, tagline, info, copyright, summary; fixed bug +# with gzip encoding (not always telling server we support it when we do) +#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail +# (dictionary of 'name', 'url', 'email'); map author to author_detail if author +# contains name + email address +#3.0b8 - 1/28/2004 - MAP - added support for contributor +#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added +# support for summary +#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from +# xml.util.iso8601 +#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain +# dangerous markup; fiddled with decodeEntities (not right); liberalized +# date parsing even further +#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); +# added support to Atom 0.2 subtitle; added support for Atom content model +# in copyright; better sanitizing of dangerous HTML elements with end tags +# (script, frameset) +#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, +# etc.) in embedded markup, in either HTML or XHTML form (
,
,
) +#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under +# Python 2.1 +#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; +# fixed bug capturing author and contributor URL; fixed bug resolving relative +# links in author and contributor URL; fixed bug resolvin relative links in +# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's +# namespace tests, and included them permanently in the test suite with his +# permission; fixed namespace handling under Python 2.1 +#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) +#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 +#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); +# use libxml2 (if available) +#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author +# name was in parentheses; removed ultra-problematic mxTidy support; patch to +# workaround crash in PyXML/expat when encountering invalid entities +# (MarkMoraes); support for textinput/textInput +#3.0b20 - 4/7/2004 - MAP - added CDF support +#3.0b21 - 4/14/2004 - MAP - added Hot RSS support +#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in +# results dict; changed results dict to allow getting values with results.key +# as well as results[key]; work around embedded illformed HTML with half +# a DOCTYPE; work around malformed Content-Type header; if character encoding +# is wrong, try several common ones before falling back to regexes (if this +# works, bozo_exception is set to CharacterEncodingOverride); fixed character +# encoding issues in BaseHTMLProcessor by tracking encoding and converting +# from Unicode to raw strings before feeding data to sgmllib.SGMLParser; +# convert each value in results to Unicode (if possible), even if using +# regex-based parsing +#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain +# high-bit characters in attributes in embedded HTML in description (thanks +# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in +# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking +# about a mapped key +#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and +# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could +# cause the same encoding to be tried twice (even if it failed the first time); +# fixed DOCTYPE stripping when DOCTYPE contained entity declarations; +# better textinput and image tracking in illformed RSS 1.0 feeds +#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed +# my blink tag tests +#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that +# failed to parse utf-16 encoded feeds; made source into a FeedParserDict; +# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; +# added support for image; refactored parse() fallback logic to try other +# encodings if SAX parsing fails (previously it would only try other encodings +# if re-encoding failed); remove unichr madness in normalize_attrs now that +# we're properly tracking encoding in and out of BaseHTMLProcessor; set +# feed.language from root-level xml:lang; set entry.id from rdf:about; +# send Accept header +#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between +# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are +# windows-1252); fixed regression that could cause the same encoding to be +# tried twice (even if it failed the first time) +#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; +# recover from malformed content-type header parameter with no equals sign +# ('text/xml; charset:iso-8859-1') +#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities +# to Unicode equivalents in illformed feeds (aaronsw); added and +# passed tests for converting character entities to Unicode equivalents +# in illformed feeds (aaronsw); test for valid parsers when setting +# XML_AVAILABLE; make version and encoding available when server returns +# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like +# digest auth or proxy support); add code to parse username/password +# out of url and send as basic authentication; expose downloading-related +# exceptions in bozo_exception (aaronsw); added __contains__ method to +# FeedParserDict (aaronsw); added publisher_detail (aaronsw) +#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always +# convert feed to UTF-8 before passing to XML parser; completely revamped +# logic for determining character encoding and attempting XML parsing +# (much faster); increased default timeout to 20 seconds; test for presence +# of Location header on redirects; added tests for many alternate character +# encodings; support various EBCDIC encodings; support UTF-16BE and +# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support +# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no +# XML parsers are available; added support for 'Content-encoding: deflate'; +# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules +# are available +#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure +# problem tracking xml:base and xml:lang if element declares it, child +# doesn't, first grandchild redeclares it, and second grandchild doesn't; +# refactored date parsing; defined public registerDateHandler so callers +# can add support for additional date formats at runtime; added support +# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added +# zopeCompatibilityHack() which turns FeedParserDict into a regular +# dictionary, required for Zope compatibility, and also makes command- +# line debugging easier because pprint module formats real dictionaries +# better than dictionary-like objects; added NonXMLContentType exception, +# which is stored in bozo_exception when a feed is served with a non-XML +# media type such as 'text/plain'; respect Content-Language as default +# language if not xml:lang is present; cloud dict is now FeedParserDict; +# generator dict is now FeedParserDict; better tracking of xml:lang, +# including support for xml:lang='' to unset the current language; +# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default +# namespace; don't overwrite final status on redirects (scenarios: +# redirecting to a URL that returns 304, redirecting to a URL that +# redirects to another URL with a different type of redirect); add +# support for HTTP 303 redirects +#4.0 - MAP - support for relative URIs in xml:base attribute; fixed +# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; +# support for Atom 1.0; support for iTunes extensions; new 'tags' for +# categories/keywords/etc. as array of dict +# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 +# terminology; parse RFC 822-style dates with no time; lots of other +# bug fixes +#4.1 - MAP - removed socket timeout; added support for chardet library diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/htmltmpl.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/htmltmpl.py new file mode 100755 index 0000000..be6e41b --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/htmltmpl.py @@ -0,0 +1,1480 @@ + +""" A templating engine for separation of code and HTML. + + The documentation of this templating engine is separated to two parts: + + 1. Description of the templating language. + + 2. Documentation of classes and API of this module that provides + a Python implementation of the templating language. + + All the documentation can be found in 'doc' directory of the + distribution tarball or at the homepage of the engine. + Latest versions of this module are also available at that website. + + You can use and redistribute this module under conditions of the + GNU General Public License that can be found either at + [ http://www.gnu.org/ ] or in file "LICENSE" contained in the + distribution tarball of this module. + + Copyright (c) 2001 Tomas Styblo, tripie@cpan.org + + @name htmltmpl + @version 1.22 + @author-name Tomas Styblo + @author-email tripie@cpan.org + @website http://htmltmpl.sourceforge.net/ + @license-name GNU GPL + @license-url http://www.gnu.org/licenses/gpl.html +""" + +__version__ = 1.22 +__author__ = "Tomas Styblo (tripie@cpan.org)" + +# All imported modules are part of the standard Python library. + +from types import * +import re +import os +import os.path +import pprint # only for debugging +import sys +import copy +import cgi # for HTML escaping of variables +import urllib # for URL escaping of variables +import cPickle # for template compilation +import gettext + +INCLUDE_DIR = "inc" + +# Total number of possible parameters. +# Increment if adding a parameter to any statement. +PARAMS_NUMBER = 3 + +# Relative positions of parameters in TemplateCompiler.tokenize(). +PARAM_NAME = 1 +PARAM_ESCAPE = 2 +PARAM_GLOBAL = 3 +PARAM_GETTEXT_STRING = 1 + +# Find a way to lock files. Currently implemented only for UNIX and windows. +LOCKTYPE_FCNTL = 1 +LOCKTYPE_MSVCRT = 2 +LOCKTYPE = None +try: + import fcntl +except: + try: + import msvcrt + except: + LOCKTYPE = None + else: + LOCKTYPE = LOCKTYPE_MSVCRT +else: + LOCKTYPE = LOCKTYPE_FCNTL +LOCK_EX = 1 +LOCK_SH = 2 +LOCK_UN = 3 + +############################################## +# CLASS: TemplateManager # +############################################## + +class TemplateManager: + """ Class that manages compilation and precompilation of templates. + + You should use this class whenever you work with templates + that are stored in a file. The class can create a compiled + template and transparently manage its precompilation. It also + keeps the precompiled templates up-to-date by modification times + comparisons. + """ + + def __init__(self, include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0): + """ Constructor. + + @header + __init__(include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0) + + @param include Enable or disable included templates. + This optional parameter can be used to enable or disable + TMPL_INCLUDE inclusion of templates. Disabling of + inclusion can improve performance a bit. The inclusion is + enabled by default. + + @param max_include Maximum depth of nested inclusions. + This optional parameter can be used to specify maximum depth of + nested TMPL_INCLUDE inclusions. It defaults to 5. + This setting prevents infinite recursive inclusions. + + @param precompile Enable or disable precompilation of templates. + This optional parameter can be used to enable or disable + creation and usage of precompiled templates. + + A precompiled template is saved to the same directory in + which the main template file is located. You need write + permissions to that directory. + + Precompilation provides a significant performance boost because + it's not necessary to parse the templates over and over again. + The boost is especially noticeable when templates that include + other templates are used. + + Comparison of modification times of the main template and all + included templates is used to ensure that the precompiled + templates are up-to-date. Templates are also recompiled if the + htmltmpl module is updated. + + The TemplateErrorexception is raised when the precompiled + template cannot be saved. Precompilation is enabled by default. + + Precompilation is available only on UNIX and Windows platforms, + because proper file locking which is necessary to ensure + multitask safe behaviour is platform specific and is not + implemented for other platforms. Attempts to enable precompilation + on the other platforms result in raise of the + TemplateError exception. + + @param comments Enable or disable template comments. + This optional parameter can be used to enable or disable + template comments. + Disabling of the comments can improve performance a bit. + Comments are enabled by default. + + @param gettext Enable or disable gettext support. + + @param debug Enable or disable debugging messages. + This optional parameter is a flag that can be used to enable + or disable debugging messages which are printed to the standard + error output. The debugging messages are disabled by default. + """ + # Save the optional parameters. + # These values are not modified by any method. + self._include = include + self._max_include = max_include + self._precompile = precompile + self._comments = comments + self._gettext = gettext + self._debug = debug + + # Find what module to use to lock files. + # File locking is necessary for the 'precompile' feature to be + # multitask/thread safe. Currently it works only on UNIX + # and Windows. Anyone willing to implement it on Mac ? + if precompile and not LOCKTYPE: + raise TemplateError, "Template precompilation is not "\ + "available on this platform." + self.DEB("INIT DONE") + + def prepare(self, file): + """ Preprocess, parse, tokenize and compile the template. + + If precompilation is enabled then this method tries to load + a precompiled form of the template from the same directory + in which the template source file is located. If it succeeds, + then it compares modification times stored in the precompiled + form to modification times of source files of the template, + including source files of all templates included via the + TMPL_INCLUDE statements. If any of the modification times + differs, then the template is recompiled and the precompiled + form updated. + + If precompilation is disabled, then this method parses and + compiles the template. + + @header prepare(file) + + @return Compiled template. + The methods returns an instance of the Template class + which is a compiled form of the template. This instance can be + used as input for the TemplateProcessor. + + @param file Path to the template file to prepare. + The method looks for the template file in current directory + if the parameter is a relative path. All included templates must + be placed in subdirectory 'inc' of the + directory in which the main template file is located. + """ + compiled = None + if self._precompile: + if self.is_precompiled(file): + try: + precompiled = self.load_precompiled(file) + except PrecompiledError, template: + print >> sys.stderr, "Htmltmpl: bad precompiled "\ + "template '%s' removed" % template + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + precompiled.debug(self._debug) + compile_params = (self._include, self._max_include, + self._comments, self._gettext) + if precompiled.is_uptodate(compile_params): + self.DEB("PRECOMPILED: UPTODATE") + compiled = precompiled + else: + self.DEB("PRECOMPILED: NOT UPTODATE") + compiled = self.update(precompiled) + else: + self.DEB("PRECOMPILED: NOT PRECOMPILED") + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + self.DEB("PRECOMPILATION DISABLED") + compiled = self.compile(file) + return compiled + + def update(self, template): + """ Update (recompile) a compiled template. + + This method recompiles a template compiled from a file. + If precompilation is enabled then the precompiled form saved on + disk is also updated. + + @header update(template) + + @return Recompiled template. + It's ensured that the returned template is up-to-date. + + @param template A compiled template. + This parameter should be an instance of the Template + class, created either by the TemplateManager or by the + TemplateCompiler. The instance must represent a template + compiled from a file on disk. + """ + self.DEB("UPDATE") + updated = self.compile(template.file()) + if self._precompile: + self.save_precompiled(updated) + return updated + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def lock_file(self, file, lock): + """ Provide platform independent file locking. + @hidden + """ + fd = file.fileno() + if LOCKTYPE == LOCKTYPE_FCNTL: + if lock == LOCK_SH: + fcntl.flock(fd, fcntl.LOCK_SH) + elif lock == LOCK_EX: + fcntl.flock(fd, fcntl.LOCK_EX) + elif lock == LOCK_UN: + fcntl.flock(fd, fcntl.LOCK_UN) + else: + raise TemplateError, "BUG: bad lock in lock_file" + elif LOCKTYPE == LOCKTYPE_MSVCRT: + if lock == LOCK_SH: + # msvcrt does not support shared locks :-( + msvcrt.locking(fd, msvcrt.LK_LOCK, 1) + elif lock == LOCK_EX: + msvcrt.locking(fd, msvcrt.LK_LOCK, 1) + elif lock == LOCK_UN: + msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) + else: + raise TemplateError, "BUG: bad lock in lock_file" + else: + raise TemplateError, "BUG: bad locktype in lock_file" + + def compile(self, file): + """ Compile the template. + @hidden + """ + return TemplateCompiler(self._include, self._max_include, + self._comments, self._gettext, + self._debug).compile(file) + + def is_precompiled(self, file): + """ Return true if the template is already precompiled on the disk. + This method doesn't check whether the compiled template is + uptodate. + @hidden + """ + filename = file + "c" # "template.tmplc" + if os.path.isfile(filename): + return 1 + else: + return 0 + + def load_precompiled(self, file): + """ Load precompiled template from disk. + + Remove the precompiled template file and recompile it + if the file contains corrupted or unpicklable data. + + @hidden + """ + filename = file + "c" # "template.tmplc" + self.DEB("LOADING PRECOMPILED") + try: + remove_bad = 0 + file = None + try: + file = open(filename, "rb") + self.lock_file(file, LOCK_SH) + precompiled = cPickle.load(file) + except IOError, (errno, errstr): + raise TemplateError, "IO error in load precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.UnpicklingError: + remove_bad = 1 + raise PrecompiledError, filename + except: + remove_bad = 1 + raise + else: + return precompiled + finally: + if file: + self.lock_file(file, LOCK_UN) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + def save_precompiled(self, template): + """ Save compiled template to disk in precompiled form. + + Associated metadata is also saved. It includes: filename of the + main template file, modification time of the main template file, + modification times of all included templates and version of the + htmltmpl module which compiled the template. + + The method removes a file which is saved only partially because + of some error. + + @hidden + """ + filename = template.file() + "c" # creates "template.tmplc" + # Check if we have write permission to the template's directory. + template_dir = os.path.dirname(os.path.abspath(filename)) + if not os.access(template_dir, os.W_OK): + raise TemplateError, "Cannot save precompiled templates "\ + "to '%s': write permission denied."\ + % template_dir + try: + remove_bad = 0 + file = None + try: + file = open(filename, "wb") # may truncate existing file + self.lock_file(file, LOCK_EX) + BINARY = 1 + READABLE = 0 + if self._debug: + cPickle.dump(template, file, READABLE) + else: + cPickle.dump(template, file, BINARY) + except IOError, (errno, errstr): + remove_bad = 1 + raise TemplateError, "IO error while saving precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.PicklingError, error: + remove_bad = 1 + raise TemplateError, "Pickling error while saving "\ + "precompiled template '%s': %s"\ + % (filename, error) + except: + remove_bad = 1 + raise + else: + self.DEB("SAVING PRECOMPILED") + finally: + if file: + self.lock_file(file, LOCK_UN) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + +############################################## +# CLASS: TemplateProcessor # +############################################## + +class TemplateProcessor: + """ Fill the template with data and process it. + + This class provides actual processing of a compiled template. + Use it to set template variables and loops and then obtain + result of the processing. + """ + + def __init__(self, html_escape=1, magic_vars=1, global_vars=0, debug=0): + """ Constructor. + + @header __init__(html_escape=1, magic_vars=1, global_vars=0, + debug=0) + + @param html_escape Enable or disable HTML escaping of variables. + This optional parameter is a flag that can be used to enable or + disable automatic HTML escaping of variables. + All variables are by default automatically HTML escaped. + The escaping process substitutes HTML brackets, ampersands and + double quotes with appropriate HTML entities. + + @param magic_vars Enable or disable loop magic variables. + This parameter can be used to enable or disable + "magic" context variables, that are automatically defined inside + loops. Magic variables are enabled by default. + + Refer to the language specification for description of these + magic variables. + + @param global_vars Globally activate global lookup of variables. + This optional parameter is a flag that can be used to specify + whether variables which cannot be found in the current scope + should be automatically looked up in enclosing scopes. + + Automatic global lookup is disabled by default. Global lookup + can be overriden on a per-variable basis by the + GLOBAL parameter of a TMPL_VAR + statement. + + @param debug Enable or disable debugging messages. + """ + self._html_escape = html_escape + self._magic_vars = magic_vars + self._global_vars = global_vars + self._debug = debug + + # Data structure containing variables and loops set by the + # application. Use debug=1, process some template and + # then check stderr to see how the structure looks. + # It's modified only by set() and reset() methods. + self._vars = {} + + # Following variables are for multipart templates. + self._current_part = 1 + self._current_pos = 0 + + def set(self, var, value): + """ Associate a value with top-level template variable or loop. + + A template identifier can represent either an ordinary variable + (string) or a loop. + + To assign a value to a string identifier pass a scalar + as the 'value' parameter. This scalar will be automatically + converted to string. + + To assign a value to a loop identifier pass a list of mappings as + the 'value' parameter. The engine iterates over this list and + assigns values from the mappings to variables in a template loop + block if a key in the mapping corresponds to a name of a variable + in the loop block. The number of mappings contained in this list + is equal to number of times the loop block is repeated in the + output. + + @header set(var, value) + @return No return value. + + @param var Name of template variable or loop. + @param value The value to associate. + + """ + # The correctness of character case is verified only for top-level + # variables. + if self.is_ordinary_var(value): + # template top-level ordinary variable + if not var.islower(): + raise TemplateError, "Invalid variable name '%s'." % var + elif type(value) == ListType: + # template top-level loop + if var != var.capitalize(): + raise TemplateError, "Invalid loop name '%s'." % var + else: + raise TemplateError, "Value of toplevel variable '%s' must "\ + "be either a scalar or a list." % var + self._vars[var] = value + self.DEB("VALUE SET: " + str(var)) + + def reset(self, keep_data=0): + """ Reset the template data. + + This method resets the data contained in the template processor + instance. The template processor instance can be used to process + any number of templates, but this method must be called after + a template is processed to reuse the instance, + + @header reset(keep_data=0) + @return No return value. + + @param keep_data Do not reset the template data. + Use this flag if you do not want the template data to be erased. + This way you can reuse the data contained in the instance of + the TemplateProcessor. + """ + self._current_part = 1 + self._current_pos = 0 + if not keep_data: + self._vars.clear() + self.DEB("RESET") + + def process(self, template, part=None): + """ Process a compiled template. Return the result as string. + + This method actually processes a template and returns + the result. + + @header process(template, part=None) + @return Result of the processing as string. + + @param template A compiled template. + Value of this parameter must be an instance of the + Template class created either by the + TemplateManager or by the TemplateCompiler. + + @param part The part of a multipart template to process. + This parameter can be used only together with a multipart + template. It specifies the number of the part to process. + It must be greater than zero, because the parts are numbered + from one. + + The parts must be processed in the right order. You + cannot process a part which precedes an already processed part. + + If this parameter is not specified, then the whole template + is processed, or all remaining parts are processed. + """ + self.DEB("APP INPUT:") + if self._debug: pprint.pprint(self._vars, sys.stderr) + if part != None and (part == 0 or part < self._current_part): + raise TemplateError, "process() - invalid part number" + + # This flag means "jump behind the end of current statement" or + # "skip the parameters of current statement". + # Even parameters that actually are not present in the template + # do appear in the list of tokens as empty items ! + skip_params = 0 + + # Stack for enabling or disabling output in response to TMPL_IF, + # TMPL_UNLESS, TMPL_ELSE and TMPL_LOOPs with no passes. + output_control = [] + ENABLE_OUTPUT = 1 + DISABLE_OUTPUT = 0 + + # Stacks for data related to loops. + loop_name = [] # name of a loop + loop_pass = [] # current pass of a loop (counted from zero) + loop_start = [] # index of loop start in token list + loop_total = [] # total number of passes in a loop + + tokens = template.tokens() + len_tokens = len(tokens) + out = "" # buffer for processed output + + # Recover position at which we ended after processing of last part. + i = self._current_pos + + # Process the list of tokens. + while 1: + if i == len_tokens: break + if skip_params: + # Skip the parameters following a statement. + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token.startswith("." + escape = tokens[i + PARAM_ESCAPE] + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + + # If output of current block is not disabled then append + # the substitued and escaped variable to the output. + if DISABLE_OUTPUT not in output_control: + value = str(self.find_value(var, loop_name, loop_pass, + loop_total, globalp)) + out += self.escape(value, escape) + self.DEB("VAR: " + str(var)) + + elif token == "." + skip_params = 1 + + # Find total number of passes in this loop. + passtotal = self.find_value(var, loop_name, loop_pass, + loop_total) + if not passtotal: passtotal = 0 + # Push data for this loop on the stack. + loop_total.append(passtotal) + loop_start.append(i) + loop_pass.append(0) + loop_name.append(var) + + # Disable output of loop block if the number of passes + # in this loop is zero. + if passtotal == 0: + # This loop is empty. + output_control.append(DISABLE_OUTPUT) + self.DEB("LOOP: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("LOOP: FIRST PASS: %s TOTAL: %d"\ + % (var, passtotal)) + + elif token == "." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(ENABLE_OUTPUT) + self.DEB("IF: ENABLE: " + str(var)) + else: + output_control.append(DISABLE_OUTPUT) + self.DEB("IF: DISABLE: " + str(var)) + + elif token == "." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(DISABLE_OUTPUT) + self.DEB("UNLESS: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("UNLESS: ENABLE: " + str(var)) + + elif token == "." + + # If this loop was not disabled, then record the pass. + if loop_total[-1] > 0: loop_pass[-1] += 1 + + if loop_pass[-1] == loop_total[-1]: + # There are no more passes in this loop. Pop + # the loop from stack. + loop_pass.pop() + loop_name.pop() + loop_start.pop() + loop_total.pop() + output_control.pop() + self.DEB("LOOP: END") + else: + # Jump to the beggining of this loop block + # to process next pass of the loop. + i = loop_start[-1] + self.DEB("LOOP: NEXT PASS") + + elif token == "." + output_control.pop() + self.DEB("IF: END") + + elif token == "." + output_control.pop() + self.DEB("UNLESS: END") + + elif token == "." + if output_control[-1] == DISABLE_OUTPUT: + # Condition was false, activate the ELSE block. + output_control[-1] = ENABLE_OUTPUT + self.DEB("ELSE: ENABLE") + elif output_control[-1] == ENABLE_OUTPUT: + # Condition was true, deactivate the ELSE block. + output_control[-1] = DISABLE_OUTPUT + self.DEB("ELSE: DISABLE") + else: + raise TemplateError, "BUG: ELSE: INVALID FLAG" + + elif token == " +

+ HTMLTMPL WARNING:
+ Cannot include template: %s +

+
+ """ % filename + self.DEB("CANNOT INCLUDE WARNING") + + elif token == "." % token + + elif DISABLE_OUTPUT not in output_control: + # Raw textual template data. + # If output of current block is not disabled, then + # append template data to the output buffer. + out += token + + i += 1 + # end of the big while loop + + # Check whether all opening statements were closed. + if loop_name: raise TemplateError, "Missing ." + if output_control: raise TemplateError, "Missing or " + return out + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def find_value(self, var, loop_name, loop_pass, loop_total, + global_override=None): + """ Search the self._vars data structure to find variable var + located in currently processed pass of a loop which + is currently being processed. If the variable is an ordinary + variable, then return it. + + If the variable is an identificator of a loop, then + return the total number of times this loop will + be executed. + + Return an empty string, if the variable is not + found at all. + + @hidden + """ + # Search for the requested variable in magic vars if the name + # of the variable starts with "__" and if we are inside a loop. + if self._magic_vars and var.startswith("__") and loop_name: + return self.magic_var(var, loop_pass[-1], loop_total[-1]) + + # Search for an ordinary variable or for a loop. + # Recursively search in self._vars for the requested variable. + scope = self._vars + globals = [] + for i in range(len(loop_name)): + # If global lookup is on then push the value on the stack. + if ((self._global_vars and global_override != "0") or \ + global_override == "1") and scope.has_key(var) and \ + self.is_ordinary_var(scope[var]): + globals.append(scope[var]) + + # Descent deeper into the hierarchy. + if scope.has_key(loop_name[i]) and scope[loop_name[i]]: + scope = scope[loop_name[i]][loop_pass[i]] + else: + return "" + + if scope.has_key(var): + # Value exists in current loop. + if type(scope[var]) == ListType: + # The requested value is a loop. + # Return total number of its passes. + return len(scope[var]) + else: + return scope[var] + elif globals and \ + ((self._global_vars and global_override != "0") or \ + global_override == "1"): + # Return globally looked up value. + return globals.pop() + else: + # No value found. + if var[0].isupper(): + # This is a loop name. + # Return zero, because the user wants to know number + # of its passes. + return 0 + else: + return "" + + def magic_var(self, var, loop_pass, loop_total): + """ Resolve and return value of a magic variable. + Raise an exception if the magic variable is not recognized. + + @hidden + """ + self.DEB("MAGIC: '%s', PASS: %d, TOTAL: %d"\ + % (var, loop_pass, loop_total)) + if var == "__FIRST__": + if loop_pass == 0: + return 1 + else: + return 0 + elif var == "__LAST__": + if loop_pass == loop_total - 1: + return 1 + else: + return 0 + elif var == "__INNER__": + # If this is neither the first nor the last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + return 1 + else: + return 0 + elif var == "__PASS__": + # Magic variable __PASS__ counts passes from one. + return loop_pass + 1 + elif var == "__PASSTOTAL__": + return loop_total + elif var == "__ODD__": + # Internally pass numbers stored in loop_pass are counted from + # zero. But the template language presents them counted from one. + # Therefore we must add one to the actual loop_pass value to get + # the value we present to the user. + if (loop_pass + 1) % 2 != 0: + return 1 + else: + return 0 + elif var.startswith("__EVERY__"): + # Magic variable __EVERY__x is never true in first or last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + # Check if an integer follows the variable name. + try: + every = int(var[9:]) # nine is length of "__EVERY__" + except ValueError: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Invalid pass number." + else: + if not every: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Pass number cannot be zero." + elif (loop_pass + 1) % every == 0: + self.DEB("MAGIC: EVERY: " + str(every)) + return 1 + else: + return 0 + else: + return 0 + else: + raise TemplateError, "Invalid magic variable '%s'." % var + + def escape(self, str, override=""): + """ Escape a string either by HTML escaping or by URL escaping. + @hidden + """ + ESCAPE_QUOTES = 1 + if (self._html_escape and override != "NONE" and override != "0" and \ + override != "URL") or override == "HTML" or override == "1": + return cgi.escape(str, ESCAPE_QUOTES) + elif override == "URL": + return urllib.quote_plus(str) + else: + return str + + def is_ordinary_var(self, var): + """ Return true if var is a scalar. (not a reference to loop) + @hidden + """ + if type(var) == StringType or type(var) == IntType or \ + type(var) == LongType or type(var) == FloatType: + return 1 + else: + return 0 + + +############################################## +# CLASS: TemplateCompiler # +############################################## + +class TemplateCompiler: + """ Preprocess, parse, tokenize and compile the template. + + This class parses the template and produces a 'compiled' form + of it. This compiled form is an instance of the Template + class. The compiled form is used as input for the TemplateProcessor + which uses it to actually process the template. + + This class should be used direcly only when you need to compile + a template from a string. If your template is in a file, then you + should use the TemplateManager class which provides + a higher level interface to this class and also can save the + compiled template to disk in a precompiled form. + """ + + def __init__(self, include=1, max_include=5, comments=1, gettext=0, + debug=0): + """ Constructor. + + @header __init__(include=1, max_include=5, comments=1, gettext=0, + debug=0) + + @param include Enable or disable included templates. + @param max_include Maximum depth of nested inclusions. + @param comments Enable or disable template comments. + @param gettext Enable or disable gettext support. + @param debug Enable or disable debugging messages. + """ + + self._include = include + self._max_include = max_include + self._comments = comments + self._gettext = gettext + self._debug = debug + + # This is a list of filenames of all included templates. + # It's modified by the include_templates() method. + self._include_files = [] + + # This is a counter of current inclusion depth. It's used to prevent + # infinite recursive includes. + self._include_level = 0 + + def compile(self, file): + """ Compile template from a file. + + @header compile(file) + @return Compiled template. + The return value is an instance of the Template + class. + + @param file Filename of the template. + See the prepare() method of the TemplateManager + class for exaplanation of this parameter. + """ + + self.DEB("COMPILING FROM FILE: " + file) + self._include_path = os.path.join(os.path.dirname(file), INCLUDE_DIR) + tokens = self.parse(self.read(file)) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, file, self._include_files, + tokens, compile_params, self._debug) + + def compile_string(self, data): + """ Compile template from a string. + + This method compiles a template from a string. The + template cannot include any templates. + TMPL_INCLUDE statements are turned into warnings. + + @header compile_string(data) + @return Compiled template. + The return value is an instance of the Template + class. + + @param data String containing the template data. + """ + self.DEB("COMPILING FROM STRING") + self._include = 0 + tokens = self.parse(data) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, None, None, tokens, compile_params, + self._debug) + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def read(self, filename): + """ Read content of file and return it. Raise an error if a problem + occurs. + @hidden + """ + self.DEB("READING: " + filename) + try: + f = None + try: + f = open(filename, "r") + data = f.read() + except IOError, (errno, errstr): + raise TemplateError, "IO error while reading template '%s': "\ + "(%d) %s" % (filename, errno, errstr) + else: + return data + finally: + if f: f.close() + + def parse(self, template_data): + """ Parse the template. This method is recursively called from + within the include_templates() method. + + @return List of processing tokens. + @hidden + """ + if self._comments: + self.DEB("PREPROCESS: COMMENTS") + template_data = self.remove_comments(template_data) + tokens = self.tokenize(template_data) + if self._include: + self.DEB("PREPROCESS: INCLUDES") + self.include_templates(tokens) + return tokens + + def remove_comments(self, template_data): + """ Remove comments from the template data. + @hidden + """ + pattern = r"### .*" + return re.sub(pattern, "", template_data) + + def include_templates(self, tokens): + """ Process TMPL_INCLUDE statements. Use the include_level counter + to prevent infinite recursion. Record paths to all included + templates to self._include_files. + @hidden + """ + i = 0 + out = "" # buffer for output + skip_params = 0 + + # Process the list of tokens. + while 1: + if i == len(tokens): break + if skip_params: + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token == "." + self._include_level += 1 + if self._include_level > self._max_include: + # Do not include the template. + # Protection against infinite recursive includes. + skip_params = 1 + self.DEB("INCLUDE: LIMIT REACHED: " + filename) + else: + # Include the template. + skip_params = 0 + include_file = os.path.join(self._include_path, filename) + self._include_files.append(include_file) + include_data = self.read(include_file) + include_tokens = self.parse(include_data) + + # Append the tokens from the included template to actual + # position in the tokens list, replacing the TMPL_INCLUDE + # token and its parameters. + tokens[i:i+PARAMS_NUMBER+1] = include_tokens + i = i + len(include_tokens) + self.DEB("INCLUDED: " + filename) + continue # Do not increment 'i' below. + i += 1 + # end of the main while loop + + if self._include_level > 0: self._include_level -= 1 + return out + + def tokenize(self, template_data): + """ Split the template into tokens separated by template statements. + The statements itself and associated parameters are also + separately included in the resulting list of tokens. + Return list of the tokens. + + @hidden + """ + self.DEB("TOKENIZING TEMPLATE") + # NOTE: The TWO double quotes in character class in the regexp below + # are there only to prevent confusion of syntax highlighter in Emacs. + pattern = r""" + (?:^[ \t]+)? # eat spaces, tabs (opt.) + (< + (?:!--[ ])? # comment start + space (opt.) + /?TMPL_[A-Z]+ # closing slash / (opt.) + statement + [ a-zA-Z0-9""/.=:_\\-]* # this spans also comments ending (--) + >) + [%s]? # eat trailing newline (opt.) + """ % os.linesep + rc = re.compile(pattern, re.VERBOSE | re.MULTILINE) + split = rc.split(template_data) + tokens = [] + for statement in split: + if statement.startswith(" 0 and '=' not in params[0]: + # implicit identifier + name = params[0] + del params[0] + else: + # explicit identifier as a 'NAME' parameter + name = self.find_param("NAME", params) + self.DEB("TOKENIZER: NAME: " + str(name)) + return name + + def find_param(self, param, params): + """ Extract value of parameter from a statement. + @hidden + """ + for pair in params: + name, value = pair.split("=") + if not name or not value: + raise TemplateError, "Syntax error in template." + if name == param: + if value[0] == '"': + # The value is in double quotes. + ret_value = value[1:-1] + else: + # The value is without double quotes. + ret_value = value + self.DEB("TOKENIZER: PARAM: '%s' => '%s'" % (param, ret_value)) + return ret_value + else: + self.DEB("TOKENIZER: PARAM: '%s' => NOT DEFINED" % param) + return None + + +############################################## +# CLASS: Template # +############################################## + +class Template: + """ This class represents a compiled template. + + This class provides storage and methods for the compiled template + and associated metadata. It's serialized by pickle if we need to + save the compiled template to disk in a precompiled form. + + You should never instantiate this class directly. Always use the + TemplateManager or TemplateCompiler classes to + create the instances of this class. + + The only method which you can directly use is the is_uptodate + method. + """ + + def __init__(self, version, file, include_files, tokens, compile_params, + debug=0): + """ Constructor. + @hidden + """ + self._version = version + self._file = file + self._tokens = tokens + self._compile_params = compile_params + self._debug = debug + self._mtime = None + self._include_mtimes = {} + + if not file: + self.DEB("TEMPLATE WAS COMPILED FROM A STRING") + return + + # Save modifitcation time of the main template file. + if os.path.isfile(file): + self._mtime = os.path.getmtime(file) + else: + raise TemplateError, "Template: file does not exist: '%s'" % file + + # Save modificaton times of all included template files. + for inc_file in include_files: + if os.path.isfile(inc_file): + self._include_mtimes[inc_file] = os.path.getmtime(inc_file) + else: + raise TemplateError, "Template: file does not exist: '%s'"\ + % inc_file + + self.DEB("NEW TEMPLATE CREATED") + + def is_uptodate(self, compile_params=None): + """ Check whether the compiled template is uptodate. + + Return true if this compiled template is uptodate. + Return false, if the template source file was changed on the + disk since it was compiled. + Works by comparison of modification times. + Also takes modification times of all included templates + into account. + + @header is_uptodate(compile_params=None) + @return True if the template is uptodate, false otherwise. + + @param compile_params Only for internal use. + Do not use this optional parameter. It's intended only for + internal use by the TemplateManager. + """ + if not self._file: + self.DEB("TEMPLATE COMPILED FROM A STRING") + return 0 + + if self._version != __version__: + self.DEB("TEMPLATE: VERSION NOT UPTODATE") + return 0 + + if compile_params != None and compile_params != self._compile_params: + self.DEB("TEMPLATE: DIFFERENT COMPILATION PARAMS") + return 0 + + # Check modification times of the main template and all included + # templates. If the included template no longer exists, then + # the problem will be resolved when the template is recompiled. + + # Main template file. + if not (os.path.isfile(self._file) and \ + self._mtime == os.path.getmtime(self._file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + self._file) + return 0 + + # Included templates. + for inc_file in self._include_mtimes.keys(): + if not (os.path.isfile(inc_file) and \ + self._include_mtimes[inc_file] == \ + os.path.getmtime(inc_file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + inc_file) + return 0 + else: + self.DEB("TEMPLATE: UPTODATE") + return 1 + + def tokens(self): + """ Get tokens of this template. + @hidden + """ + return self._tokens + + def file(self): + """ Get filename of the main file of this template. + @hidden + """ + return self._file + + def debug(self, debug): + """ Get debugging state. + @hidden + """ + self._debug = debug + + ############################################## + # PRIVATE METHODS # + ############################################## + + def __getstate__(self): + """ Used by pickle when the class is serialized. + Remove the 'debug' attribute before serialization. + @hidden + """ + dict = copy.copy(self.__dict__) + del dict["_debug"] + return dict + + def __setstate__(self, dict): + """ Used by pickle when the class is unserialized. + Add the 'debug' attribute. + @hidden + """ + dict["_debug"] = 0 + self.__dict__ = dict + + + def DEB(self, str): + """ Print debugging message to stderr. + @hidden + """ + if self._debug: print >> sys.stderr, str + + +############################################## +# EXCEPTIONS # +############################################## + +class TemplateError(Exception): + """ Fatal exception. Raised on runtime or template syntax errors. + + This exception is raised when a runtime error occurs or when a syntax + error in the template is found. It has one parameter which always + is a string containing a description of the error. + + All potential IOError exceptions are handled by the module and are + converted to TemplateError exceptions. That means you should catch the + TemplateError exception if there is a possibility that for example + the template file will not be accesssible. + + The exception can be raised by constructors or by any method of any + class. + + The instance is no longer usable when this exception is raised. + """ + + def __init__(self, error): + """ Constructor. + @hidden + """ + Exception.__init__(self, "Htmltmpl error: " + error) + + +class PrecompiledError(Exception): + """ This exception is _PRIVATE_ and non fatal. + @hidden + """ + + def __init__(self, template): + """ Constructor. + @hidden + """ + Exception.__init__(self, template) + diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/sanitize.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/sanitize.py new file mode 100755 index 0000000..c98b14d --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/sanitize.py @@ -0,0 +1,354 @@ +""" +sanitize: bringing sanitiy to world of messed-up data +""" + +__author__ = ["Mark Pilgrim ", + "Aaron Swartz "] +__contributors__ = ["Sam Ruby "] +__license__ = "BSD" +__version__ = "0.25" + +_debug = 0 + +# If you want sanitize to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +import sgmllib, re + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 + + _chardet = lambda data: chardet.detect(data)['encoding'] +except: + chardet = None + _chardet = lambda data: None + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + _r_barebang = re.compile(r'') + + def __init__(self, encoding): + self.encoding = encoding + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + def feed(self, data): + data = self._r_barebang.sub(r'<!\1', data) + data = self._r_bareamp.sub("&", data) + data = self._r_shorttag.sub(self._shorttag_replace, data) + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + + def normalize_attrs(self, attrs): + # utility method to be called by descendants + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
, tag='pre', attrs=[('class', 'screen')]
+        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
+        uattrs = []
+        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
+        for key, value in attrs:
+            if type(value) != type(u''):
+                value = unicode(value, self.encoding)
+            uattrs.append((unicode(key, self.encoding), value))
+        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
+        if tag in self.elements_no_end_tag:
+            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
+        else:
+            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
+
+    def unknown_endtag(self, tag):
+        # called for each end tag, e.g. for 
, tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + self.pieces.append('&%(ref)s;' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', + 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', + 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', + 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', + 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'textarea', 'tbody', 'td', 'tfoot', 'th', + 'thead', 'tr', 'tt', 'u', 'ul', 'var'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', + 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', + 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', + 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', + 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', + 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', + 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', + 'usemap', 'valign', 'value', 'vspace', 'width'] + + ignorable_elements = ['script', 'applet', 'style'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.tag_stack = [] + self.ignore_level = 0 + + def feed(self, data): + _BaseHTMLProcessor.feed(self, data) + while self.tag_stack: + _BaseHTMLProcessor.unknown_endtag(self, self.tag_stack.pop()) + + def unknown_starttag(self, tag, attrs): + if tag in self.ignorable_elements: + self.ignore_level += 1 + return + + if self.ignore_level: + return + + if tag in self.acceptable_elements: + attrs = self.normalize_attrs(attrs) + attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] + if tag not in self.elements_no_end_tag: + self.tag_stack.append(tag) + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + + def unknown_endtag(self, tag): + if tag in self.ignorable_elements: + self.ignore_level -= 1 + return + + if self.ignore_level: + return + + if tag in self.acceptable_elements and tag not in self.elements_no_end_tag: + match = False + while self.tag_stack: + top = self.tag_stack.pop() + if top == tag: + match = True + break + _BaseHTMLProcessor.unknown_endtag(self, top) + + if match: + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.ignore_level: + text = text.replace('<', '') + _BaseHTMLProcessor.handle_data(self, text) + +def HTML(htmlSource, encoding='utf8'): + p = _HTMLSanitizer(encoding) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count(''): + data = data.split('>', 1)[1] + if data.count('' % self.url) + + def test_changedurl(self): + # change the URL directly + self.channel.url = self.changed_url + self.assertEqual(self.channel.feed_information(), + "<%s> (formerly <%s>)" % (self.changed_url, self.url)) + +if __name__ == '__main__': + unittest.main() diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/tests/test_main.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/tests/test_main.py new file mode 100755 index 0000000..c2be62d --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/tests/test_main.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +import os, sys, shutil, errno, unittest +from ConfigParser import ConfigParser +from StringIO import StringIO +import planet + +class MainTest(unittest.TestCase): + + def test_minimal(self): + configp = ConfigParser() + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", []) + + def test_onefeed(self): + configp = ConfigParser() + configp.readfp(StringIO("""[http://www.example.com/] +name = Mary +""")) + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", [], True) + + + def test_generateall(self): + configp = ConfigParser() + configp.readfp(StringIO("""[http://www.example.com/] +name = Mary +""")) + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", [], True) + basedir = os.path.join(os.path.dirname(os.path.abspath(sys.modules[__name__].__file__)), 'data') + os.mkdir(self.output_dir) + t_file_names = ['simple', 'simple2'] + self._remove_cached_templates(basedir, t_file_names) + t_files = [os.path.join(basedir, t_file) + '.tmpl' for t_file in t_file_names] + my_planet.generate_all_files(t_files, "Planet Name", + 'http://example.com/', 'http://example.com/feed/', 'Mary', 'mary@example.com') + for file_name in t_file_names: + name = os.path.join(self.output_dir, file_name) + content = file(name).read() + self.assertEqual(content, 'Mary\n') + + def _remove_cached_templates(self, basedir, template_files): + """ + Remove the .tmplc files and force them to be rebuilt. + + This is required mainly so that the tests don't fail in mysterious ways in + directories that have been moved, eg 'branches/my-branch' to + 'branches/mysterious-branch' -- the .tmplc files seem to remember their full + path + """ + for file in template_files: + path = os.path.join(basedir, file + '.tmplc') + try: + os.remove(path) + except OSError, e: + # we don't care about the file not being there, we care about + # everything else + if e.errno != errno.ENOENT: + raise + + def setUp(self): + super(MainTest, self).setUp() + self.output_dir = 'output' + + def tearDown(self): + super(MainTest, self).tearDown() + shutil.rmtree(self.output_dir, ignore_errors = True) + shutil.rmtree('cache', ignore_errors = True) + +if __name__ == '__main__': + unittest.main() diff --git a/DJAGEN/branches/oguz/djagen/gezegen/planet/tests/test_sanitize.py b/DJAGEN/branches/oguz/djagen/gezegen/planet/tests/test_sanitize.py new file mode 100755 index 0000000..f0f1d42 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/gezegen/planet/tests/test_sanitize.py @@ -0,0 +1,125 @@ +# adapted from http://www.iamcal.com/publish/articles/php/processing_html_part_2/ +# and from http://feedparser.org/tests/wellformed/sanitize/ +# by Aaron Swartz, 2006, public domain + +import unittest, new +from planet import sanitize + +class SanitizeTest(unittest.TestCase): pass + +# each call to HTML adds a test case to SanitizeTest +testcases = 0 +def HTML(a, b): + global testcases + testcases += 1 + func = lambda self: self.assertEqual(sanitize.HTML(a), b) + method = new.instancemethod(func, None, SanitizeTest) + setattr(SanitizeTest, "test_%d" % testcases, method) + +## basics +HTML("","") +HTML("hello","hello") + +## balancing tags +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("","") + +## trailing slashes +HTML('','') +HTML('','') +HTML('','') + +## balancing angle brakets +HTML('','b>') +HTML('','>') +HTML('foofoo','b>foo') +HTML('>') +HTML('b><','b>') +HTML('>','>') + +## attributes +HTML('','') +HTML('','') +HTML('','') + +## dangerous tags (a small sample) +sHTML = lambda x: HTML(x, 'safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') + +for x in ['onabort', 'onblur', 'onchange', 'onclick', 'ondblclick', 'onerror', 'onfocus', 'onkeydown', 'onkeypress', 'onkeyup', 'onload', 'onmousedown', 'onmouseout', 'onmouseover', 'onmouseup', 'onreset', 'resize', 'onsubmit', 'onunload']: + HTML('' % x, + '') + +HTML('never trust your upstream platypus', 'never trust your upstream platypus') + +## ignorables +HTML('foo', 'foo') + +## non-allowed tags +HTML('','') +HTML('\r\n\r\n\r\n\r\n\r\nfunction executeMe()\r\n{\r\n\r\n\r\n\r\n\r\n/* + + + + + + {% endblock %} + +
+
+ RSS + Atom +
+
+ +
+ +
+ {% block menu %} + + + {% endblock %} +
+ +

Gezegen her 10 dakikada bir yenilenir. Son güncelleme: {{ run_time.get_run_time }}

+ +
+ + {% block body %} + {% endblock %} + + +
+ + {% block footer%} + + {% endblock %} + + + + + +
+ + diff --git a/DJAGEN/branches/oguz/djagen/templates/main/feeds.html b/DJAGEN/branches/oguz/djagen/templates/main/feeds.html new file mode 100755 index 0000000..f2bd421 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/templates/main/feeds.html @@ -0,0 +1,26 @@ +
+ +
diff --git a/DJAGEN/branches/oguz/djagen/templates/main/index.html b/DJAGEN/branches/oguz/djagen/templates/main/index.html new file mode 100755 index 0000000..35a41a3 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/templates/main/index.html @@ -0,0 +1,915 @@ + + + + Linux Gezegeni + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+

16 Mart 2010

+ +
+ + +
+
+
+

+ +Yakın Doğu’da Seminer Rüzgarları +

+
+
+
+

Geçen haftadan beri Yakın Doğu Üniversitesi’nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir’lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu’nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.

+

Yakın Doğu’ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.

+

+

Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.

+

+

Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.

+

+

Ali Erdinc’in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.

+

+

Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite duyuru sayfası, Facebook ve Twitter‘dan takip edebileceklerini söyleyelim. Hatta Kıbrıs’ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.

+

Lefkoşa’ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs’tan bildirdi.

+

+
+
+
+ + + + + + + + + +
+
+ +
+
+
+

05 Şubat 2010

+ +
+ + +
+
+
+

+ +100 ml +

+
+
+
+

1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs’a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.

+

Şimdi olayın benim açımdan garip noktalarına gelelim

+

* Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.

+

* Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.

+

* Elimle çöpe attım, o çok koydu.

+

Ben de bunun üzerine Ulaştırma Bakanlığı’na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006′da İngiltere’de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006′da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç’te, ABD ve Kanada’da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt’lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:

+

“Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.”

+

Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.

+

Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.

+
+
+
+ + + + + + + + + +
+
+ +
+
+
+

29 Ocak 2010

+ +
+ + +
+
+
+

+ +Artık Sun yok! +

+
+
+
+

iPsunoraclead haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun’ı satın alma işlemini bitirdi. Artık www.sun.com adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.

+

Beni en çok ilgilendiren konular ise Sun’ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.

+

Umarım hepimiz için mutlu son olur…

+

Ek: Kültür Mantarı‘nın yönlendirmesi ile James Gosling’in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım…

+

sunrip


+
+
+ + + + + + +
+
+ +
+
+
+

24 Aralık 2009

+ +
+ + +
+
+
+

+ +EMO 13. Ulusal Kongresi +

+
+
+
+

EMO’nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan 13. Ulusal Kongresi kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda “Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz” ve 11.30-12.30 arasında da “Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.

+

Genel olarak yüklü bir programı olan bu etkinlikte çeşitli LKD seminerleri de olacak. Buyrunuz geliniz!


+
+
+ + + + + + +
+
+ +
+
+
+

24 Eylül 2009

+ +
+ + +
+
+
+

+ +Intel, Atom, Moblin +

+
+
+
+

Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel’e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin’i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel’den üç önemli açıklama oldu…

+

Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. Atom Developer Program‘ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var… ( Ben kayıt olacağım :) )

+

İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin’in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir akıllı telefon üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu :) Geçenlerde de yazmıştım,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız…


+
+
+ + + + + + +
+
+ +
+
+
+

25 Ağustos 2009

+ +
+ + +
+
+
+

+ +Teknik Destek Kopya Kağıtı +

+
+
+
+

xkcd’de geçen gün yayınlanan bu teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.

+

teknikdestek
+İsteyenler için ODF hali de burada


+
+
+ + + + + + +
+
+ +
+
+
+

18 Ağustos 2009

+ +
+ + +
+
+
+

+ +Korsan Değil “Fikir Hırsızı” +

+
+
+
+

Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu’nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma başlatmış. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.

+

Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek :) Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor…

+

Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.

+

Son olarak bir haber daha verelim Pirate Bay’in 23 GB’lik arşivi de paylaşıma açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin :) Korsan değil özgür yazılım kullanın!


+
+
+ + + + + + +
+
+ +
+
+
+

07 Temmuz 2009

+ +
+ + +
+
+
+

+ +Mobil Cihazlar Dünyasında Neler Oluyor? +

+
+
+
+

moblinBir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. Android, WebOS ve iPhone OS‘a  karşı Symbian‘ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan’ı açık kaynak kodlu olarak  bu vakfa devretmişti.

+

Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC’lerin geliştirilmesine olanak sağladı ve NetBook’lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.

+

Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : Moblin.

+

Moblin’e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia’nın kendi tabletlerinde kullanmak amacıyla ürettiği Maemo‘yu desteklemeye karar verdiğini açıkladı. Intel’de Moblin’i Linux Vakfı’na devrettiğini ve destek konusunda da Novell’le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia’nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo’yu Qt’ye taşıyacağını ilan etti.

+

İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo’yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID’ler üretip bunlarda Mameo’mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian’ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID’ler konusunda neler planlıyor? Bu planları içerisinde Moblin’i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo’ya yatırım mı yapacaklar? NetBook’larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?

+

Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?


+
+
+ + + + + + +
+
+ +
+
+
+

17 Haziran 2009

+ +
+ + +
+
+
+

+ +LKD Genel Kurulu için Ankara’ya +

+
+
+
+

Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara’ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan’dan…

+

***

+

Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,

+

TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca
+kurum.
+Evet bu kurum malesef bilet satmak istemiyor.

+

1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir
+sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları
+portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)

+

2- web sitesindeki bilet satış uygulamasında banka sıra makinelerinin bir
+türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk
+karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin
+bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir
+koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve
+internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.
+(İnternet olsun dedim, sonuç yok dedi.)

+

3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,
+veee… Evet, biz satıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii
+ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.

+

4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize
+bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş
+dönüş 1 tam 1 öğrenci istiyorum. Satıcı önce
+- G/D kesmiyorum diyor buradan.!
+- Nasıl yani?
+- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?
+Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.
+- Elbette G/D niye alayım indirim var diyorum.
+Neyse girip deniyor, gelen koltuk numaralarını soruyorum.
+- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)
+- Değiştiremiyor musunuz?
+- Malesef.
+- Internet sürümüne mi giriyorsunuz diyorum ister istemez.
+- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen
+üzerine ek komisyon ekleniyor sadece.)
+- Kim koltuk seçtiriyor bana ?
+- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.

+

5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.
+Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk
+seçebiliyor musunuz?
+- Bakalım yer boş olursa seçebiliriz diyor satıcı bu kez.
+- Ohh nihayet.
+- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.
+- Öğrencide G/D farkı yok cevabı geliyor.
+- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız
+öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe
+yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat
+eder.)
+- Kredi kartımı, peşin mi?
+- DIINN ! kredi kartı.. var dimi?
+- Evet, 112 TL
+- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.

+

Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye
+kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak
+bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.

+

Velhasıl,
+Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX
+Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX

+

Hayırlı yolculuklar.

+

=====================
+Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor
+daha. 2-3 nolarda satılan yerler var.

+

Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya
+satış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi
+bekliyor olabilir, kimbilir?

+

Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?


+
+
+ + + + + + +
+
+ +
+
+
+

16 Haziran 2009

+ +
+ + +
+
+
+

+ +IE, WTW ve Gıda Yardımı +

+
+
+
+

wfp-wtwBugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft’un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8′in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara buradan ulaşabilirsiniz…

+

Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin TechCrunch‘da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.

+

İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin… Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı’nın Walk The Web kampanyasına bir göz atmanızı öneririm…

+ +

Son olarak da bugünlerde herkese önerdiğim gibi Yuva ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.


+
+
+ + + + + + +
+
+ +
+
+
+

28 Mayıs 2009

+ +
+ + +
+
+
+

+ +TBD Bilişim Kongresi’nde Özgür Yazılım Paneli +

+
+
+
+

TBD’nin bu yıl 3.sünü düzenlediği İstanbul Bilişim Kongresi‘nde Pazar günü saat 14:00′de Özgür Yazılım Paneli olacaktır. Panel’de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur…

+

Yer: Marmara Üniversitesi Nişantaşı Kampüsü
+Erdal İnönü Bilim ve Kültür Merkezi
+Tarih: 31 Mayıs Pazar, 14:00 - 15:20
+Oturum başkanı: Görkem Çetin
+Konuşmacılar: Enver Altın, Hakan Uygun, Cahit Cengizhan


+
+
+ + + + + + +
+
+ +
+
+
+

13 Nisan 2009

+ +
+ + +
+
+
+

+ +Sıralama Algoritmaları +

+
+
+
+

Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu siteye bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz…


+
+
+ + + + + + +
+
+ +
+
+
+
+ + + + + + + + + + + + + + diff --git a/DJAGEN/branches/oguz/djagen/templates/main/main.html b/DJAGEN/branches/oguz/djagen/templates/main/main.html new file mode 100755 index 0000000..6c178fa --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/templates/main/main.html @@ -0,0 +1,36 @@ +{% extends "main/base.html" %} + + + {% block body %} + + {% for entry in entries_list|slice:items_per_page %} + {% autoescape off %} + + {% ifequal entry.entry_id.is_approved 1 %} + + {% ifchanged entry.date.day entry.date.month entry.date.year %}
{% endifchanged %} + + {% ifchanged %}

{{ entry.date|date:"d F Y" }}

{% endifchanged %} + +
+ + +

{{ entry.title }}

+

+ Yazar: {{ entry.entry_id.author_name }} + Tarih: {{ entry.date|date:"d F Y H:i" }} +

+
+ {{ entry.content_html|truncatewords_html:truncate_words }} +
+ {% endifequal %} + {% endautoescape %} + +
+ + + {% endfor %} + + + {% endblock %} + diff --git a/DJAGEN/branches/oguz/djagen/templates/main/members.html b/DJAGEN/branches/oguz/djagen/templates/main/members.html new file mode 100755 index 0000000..93eb28a --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/templates/main/members.html @@ -0,0 +1,16 @@ +{% extends "main/base.html" %} + + {% block body %} + + + + + {% endblock %} diff --git a/DJAGEN/branches/oguz/djagen/templates/main/subscribe.html b/DJAGEN/branches/oguz/djagen/templates/main/subscribe.html new file mode 100755 index 0000000..539ca58 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/templates/main/subscribe.html @@ -0,0 +1,42 @@ +{% extends "main/base.html" %} + + {% block body %} +

+ Linux Gezegeni Gezegen Ekibi tarafından yönetilmektedir, Gezegen hakkındaki sorularınızı ve Gezegen'e iniş başvurularınızı e-posta ile iletebilirsiniz. +

+ +
+ +

+ Gezegene iniş başvurularınızda Gezegen Kuralları'na uyan RSS/Atom beslemenizi ve gezegen içerisinde kullanmak istediğiniz (en fazla 80x80 çözünürlüklü) fotoğrafınızı (bir başka deyişle hackergotchi); varsa jabber adresini aşağıdaki formu kullanarak göndermenizi rica ediyoruz. +

+ +
+ + {% ifnotequal submit 'done' %} + +

Üye Başvuru Formu

+
+ {% for field in form %} +
+ {% if field.errors %} + {{ field.errors }} + {% endif %} + {{ field.label_tag }} + {% if field.help_text %} + {{ field.help_text }} + {% endif %} + {{ field }} +
+ {% endfor %} +
+ +
+ {% else %} +

+ Kaydınız alındı. +

+ {% endifnotequal %} + + {% endblock %} + diff --git a/DJAGEN/branches/oguz/djagen/urls.py b/DJAGEN/branches/oguz/djagen/urls.py new file mode 100755 index 0000000..063a5a6 --- /dev/null +++ b/DJAGEN/branches/oguz/djagen/urls.py @@ -0,0 +1,23 @@ +from django.conf.urls.defaults import * + +# Uncomment the next two lines to enable the admin: +from django.contrib import admin +admin.autodiscover() + +urlpatterns = patterns('', + # Example: + # (r'^djagen/', include('djagen.foo.urls')), + + # Uncomment the admin/doc line below and add 'django.contrib.admindocs' + # to INSTALLED_APPS to enable admin documentation: + # (r'^admin/doc/', include('django.contrib.admindocs.urls')), + + # Uncomment the next line to enable the admin: + (r'^admin/(.*)', admin.site.root), + (r'^main/', 'djagen.collector.views.main'), + (r'^subscribe/', 'djagen.collector.views.member_subscribe'), + (r'^members/', 'djagen.collector.views.list_members'), +) +urlpatterns += patterns('', + url(r'^captcha/', include('captcha.urls')), +) diff --git a/DJAGEN/tags/djagen_old/djagen/__init__.py b/DJAGEN/tags/djagen_old/djagen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/tags/djagen_old/djagen/collector/__init__.py b/DJAGEN/tags/djagen_old/djagen/collector/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/tags/djagen_old/djagen/collector/admin.py b/DJAGEN/tags/djagen_old/djagen/collector/admin.py new file mode 100755 index 0000000..f6c9e20 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/collector/admin.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django.contrib import admin +from djagen.collector.models import * + +from django.conf import settings + +import os +import datetime +import shutil + +from djagen.collector.configini import * + +class AuthorsAdmin (admin.ModelAdmin): + + list_display = ('author_id', 'author_name', 'author_email', 'author_face', 'current_status', 'is_approved', 'label_personal', 'label_lkd', 'label_community', 'label_eng') + list_select_related = True + + search_fields = ['author_name', 'author_surname', 'author_email'] + + def save_model(self, request, obj, form, change): + + #get the values for saving + author_name = obj.author_name + author_surname = obj.author_surname + author_face = obj.author_face + channel_url = obj.channel_url + + current_status = obj.current_status + is_approved = obj.is_approved + + #creating the history + now = datetime.datetime.now() + action_type = current_status + + author_id = obj.author_id + if author_id: + #then this is an update + author = Authors.objects.get(author_id = author_id) + pre_status = author.is_approved + current_status = obj.is_approved + obj.save() + else: + obj.save() + author = Authors.objects.get(author_name=author_name, author_surname=author_surname, channel_url=channel_url) + pre_status = None + current_status = author.is_approved + + author.history_set.create(action_type=action_type, action_date=now, action_owner=request.user.username) + + + #create tmp_config.ini here + handler = Handler(author.author_id) + handler.create_tmp_entries() + + if pre_status != current_status: + a_face = author.author_face + + images_path = os.path.join(settings.MAIN_PATH, 'www', 'images') + heads_path = os.path.join(images_path, 'heads') + face_path = os.path.join(heads_path, a_face) + + tmp_image_path = os.path.join(settinsg.MAIN_PATH, 'temp_ini', a_face) + + if os.path.exits(tmp_image_path): + shutil.move(tmp_image_path, face_path) + +class HistoryAdmin(admin.ModelAdmin): + list_display = ('action_type', 'action_date', 'action_author', 'action_owner') + +admin.site.register(History, HistoryAdmin) +admin.site.register(Authors, AuthorsAdmin) + diff --git a/DJAGEN/tags/djagen_old/djagen/collector/configini.py b/DJAGEN/tags/djagen_old/djagen/collector/configini.py new file mode 100755 index 0000000..af4f7ee --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/collector/configini.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +from django.conf import settings +from djagen.collector.models import * +import ConfigParser + +class Handler: + + def __init__(self, id): + + self.id = id + + self.tmp_entries_ini = os.path.join(settings.MAIN_PATH, 'tmp_ini', 'tmp_entries.ini') + + self.config_entries_ini = os.path.join(settings.MAIN_PATH, 'gezegen', 'config_entries.ini') + + def __set_values(self): + + author = Authors.objects.get(author_id = self.id) + + if not author.is_approved: + return False + + self.name = author.author_name + ' ' + author.author_surname + self.face = author.author_face + self.url = author.channel_url + + labels = {author.label_personal:'Personal', author.label_lkd: 'LKD', author.label_community: 'Community', author.label_eng: 'Eng'} + + label_li = [k for k,v in labels.iteritems() if v==1] + self.author_labels = " ".join(label_li) + + return True + + def create_tmp_entries(self): + + if not self.__set_values(): return + + config_entries = open(self.config_entries_ini) + tmp_entries = open(self.tmp_entries_ini, 'w') + + Config = ConfigParser.ConfigParser() + Config.read(self.config_entries_ini) + sections = Config.sections() + + for section in sections: + + config_name = Config.get(section, 'name') + config_label = Config.get(section, 'label') + config_id = Config.get(section, 'id') + config_url = section + + try: + config_face = Config.get(section, 'face') + except: + config_face = None + + if config_id == self.id: + + url = self.url + face = self.face + name = self.name + label = self.author_labels + id = self.id + + else: + + url = config_url + face = config_face + name = config_name + label = config_label + id = config_id + + s = url + '\n' + s += 'name = ' + name + '\n' + s += 'label = ' + label + '\n' + if face: + s += 'face = ' + face + '\n' + s += 'id = ' + id + '\n' + '\n' + + tmp_entries.write(s) + + tmp_entries.close() + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/collector/configxml.py b/DJAGEN/tags/djagen_old/djagen/collector/configxml.py new file mode 100755 index 0000000..e952792 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/collector/configxml.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +from xml.dom import minidom + +class Handler: + + def __init__(self): + + self.main_url = "/home/oguz/django-projects/djagen/gezegen" + self.gezegen_url = os.path.join(self.main_url,"gezegen") + self.entries_xml = os.path.join(self.gezegen_url, "config_entries.xml") + self.header_xml = os.path.join(self.gezegen_url, 'config_header.xml') + self.tmp_ini_dir_path = os.path.join(self.main_url, "tmp_ini") + + def get_doc(self, type="entries"): + + if type == "entries": + self.doc = minidom.parse(self.entries_xml) + else: + self.doc = minidom.parse(self.header_xml) + return self.doc + + def get_tag_entries(self,tag): + + self.entries = self.doc.getElementsByTagName(tag) + return self.entries + + def set_ini_variables(self, id, name, feed, nick, face, label): + + self.tmp_ini = {'id': id, 'name': name, 'feed': feed, 'nick': nick, 'face': face, 'label': label} + + def open_file(self): + path = os.path.join(self.tmp_ini_dir_path, 'tmp.ini') + self.f = open(path, "w") + + def create_header(self): + + for header in self.entries: + + children = header.childNodes + for child in children: + if child.nodeType == child.TEXT_NODE: continue + else: + node_name = child.nodeName + f_child = child.firstChild + node_value = f_child.nodeValue + + s = [] + if node_name != "header_name": + s.append(node_name) + s.append("=") + s.append(node_value) + s.append("\n") + ss = " ".join(s) + self.f.write(ss) + + def traverse(self): + + for entry in self.entries: + + nodes = entry.childNodes + + for node in nodes: + + child = node.firstChild + self.face = None + + if node.nodeType == node.TEXT_NODE: continue + + if node.nodeName == "feed": + self.feed = child.toxml() + + if node.nodeName == "name": + self.name = child.toxml() + + if node.nodeName == "nick": + self.nick = child.toxml() + + if node.nodeName == "label": + self.label = child.toxml() + + if node.nodeName == "face": + self.face = child.toxml() + + if node.nodeName == "id": + self.id = child.toxml() + + if int(self.tmp_ini['id']) == int(self.id): + + self.write_to_file(self.tmp_ini) + + else: + + config = {'id': self.id, 'name': self.name, 'feed': self.feed, 'nick': self.nick, 'label': self.label, 'face': self.face} + self.write_to_file(config) + + + def write_to_file(self, dic): + + feed = "feed = " + dic['feed'] + "\n" + name = "name = " + dic['name'] + "\n" + nick = "nick = " + dic['nick'] + "\n" + label = "label = " + dic['label'] + "\n" + id = "id = " + dic['id'] + "\n" + + self.f.write("\n") + self.f.write(feed) + self.f.write(name) + self.f.write(nick) + if dic['face']: + face = "face = " + dic['face'] + "\n" + self.f.write(face) + self.f.write(label) + self.f.write(id) + + def close_file(self): + self.f.close() + + diff --git a/DJAGEN/tags/djagen_old/djagen/collector/forms.py b/DJAGEN/tags/djagen_old/djagen/collector/forms.py new file mode 100755 index 0000000..11a61d8 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/collector/forms.py @@ -0,0 +1,17 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django import forms +from captcha.fields import CaptchaField + +class ContactForm(forms.Form): + + name = forms.CharField(max_length=25, required=True, error_messages={'required': 'Lütfen adınızı giriniz'}, label='Adınız') + surname = forms.CharField(max_length=25, required=True, error_messages={'required': 'Lütfen soyadınızı giriniz'}, label='Soyadınız') + email = forms.EmailField(required=True, error_messages={'required': 'Size ulaşabileceğimiz eposta adresinizi giriniz'}, label='Eposta Adresiniz') + hackergotchi = forms.FileField(required=False, label='Hacketgotchiniz', help_text='Max 80*80 pixellik Gezegende görünmesini istediğiniz fotoğrafınız') + feed = forms.URLField(required=True, label='Besleme adresiniz', help_text='Günlüğünüzün XML kaynağının adresi') + message = forms.CharField(required=False, label='İletişim Mesajınız', widget=forms.widgets.Textarea()) + #field for captcha + captcha = CaptchaField(label="Captcha Alanı", help_text='Gördüğünü karakterleri aynen yazınız', error_messages={'required': 'Hatalı yazdınız!'}) + diff --git a/DJAGEN/tags/djagen_old/djagen/collector/models.py b/DJAGEN/tags/djagen_old/djagen/collector/models.py new file mode 100755 index 0000000..eee5269 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/collector/models.py @@ -0,0 +1,111 @@ +from django.db import models +import datetime, unicodedata, random, time +import re + +# Create your models here. +ACTION_CHOICES = ( + (1, u'Removed'), + (2, u'Approved'), + (3, u'Paused'), + (4, u'Readded'), + (5, u'Applied'), + (6, u'Editted') + ) + +class Authors (models.Model): + author_id = models.AutoField(primary_key=True, help_text="Author ID") + author_name = models.CharField(max_length=50, help_text="Author Name") + author_surname = models.CharField(max_length=50, help_text="Author Name") + #we dont keep emails at the config.ini files, this part should be entered at the admin page + author_email = models.EmailField(null=True, blank=True, help_text="Author Email Address") + #the png file name of the author + author_face = models.CharField(max_length=30, null=True, blank=True, help_text="Author Face Name") + channel_subtitle = models.TextField(null=True, blank=True, help_text="Channel Subtitle") + channel_title = models.TextField(null=True, blank=True, help_text="Channel Title") + #URL of the feed. + channel_url = models.URLField(help_text="Channel URL") + #Link to the original format feed + channel_link = models.URLField(null=True, blank=True, help_text="Channel Link") + channel_urlstatus = models.IntegerField(null=True, blank=True, help_text="Channel URL Status") + + #use this field to check whether the author is shown on the planet or not, like banned situations + current_status = models.SmallIntegerField(default=2, choices=ACTION_CHOICES, help_text="Current Status of the Author") + #whether the application to the planet is approved, the approved ones will be shown at the planet + is_approved = models.BooleanField(default=1, help_text="Approve Status of the Author") + + #planets that the channel belongs to + #at the config.ini the entries should be obe of the belows: + #label = Personal + #label = LKD + #label = Eng + #label = Community + label_personal = models.BooleanField(default=1, help_text="Channnels at the Personal Blog Page") + label_lkd = models.BooleanField(default=0, help_text="Channels that are belong to LKD Blogs") + label_community = models.BooleanField(default=0, help_text="Channels that are belong to some community blogs") + label_eng = models.BooleanField(default=0, help_text="Channels that have English entries") + #at the main page, lets just show personal and lkd for now, for communities lets ask them a special rss + + def __unicode__(self): + return u'%s %s' % (self.author_name, self.author_surname) + + class Meta: + #order according to the author_name, ascending + ordering = ['author_name'] + +# keep the history for the action that are done on the member urls +class History (models.Model): + action_type = models.SmallIntegerField(choices=ACTION_CHOICES) + action_date = models.DateTimeField() + action_explanation = models.TextField(help_text="Reason of Action", blank=True, null=True) + action_author = models.ForeignKey('Authors') + action_owner = models.CharField(max_length=20, help_text="The user who did the action") + + def __unicode__(self): + return str(self.action_type) + + class Meta: + #order descending, show the last actions at top + ordering = ['-action_date'] + +class Entries (models.Model): + id_hash = models.CharField(max_length=50, help_text="Hash of the ID", primary_key=True) + title = models.CharField(max_length=150, help_text="Entry Title") + content_html = models.TextField(help_text="Entry Orginal Content") + content_text = models.TextField(help_text="Entry Pure Text Content") + summary = models.TextField(help_text="Entry Summary", null=True, blank=True) + link = models.URLField(help_text="Link to Entry") + date = models.DateTimeField(help_text="Date of the entry") + entry_id = models.ForeignKey('Authors') + + def __unicode__(self): + + return self.title + + class Meta: + + ordering = ['-date'] + + + def sanitize(self, data): + p = re.compile(r'<[^<]*?/?>') + return p.sub('', data) + +class RunTime (models.Model): + run_time = models.DateTimeField(help_text="Run time of the planet script", auto_now=True) + + def __unicode__(self): + + return self.run_time + + class Meta: + + ordering = ['-run_time'] + + def get_run_time(self): + + dt = ".".join(map(lambda x: str(x), [self.run_time.day, self.run_time.month, self.run_time.year])) + hm = ":".join(map(lambda x: str(x), [self.run_time.hour, self.run_time.minute])) + + rslt = " ".join([dt, hm]) + return rslt + diff --git a/DJAGEN/tags/djagen_old/djagen/collector/views.py b/DJAGEN/tags/djagen_old/djagen/collector/views.py new file mode 100755 index 0000000..209b7b7 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/collector/views.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- + +# View definitions are created here. +from django.shortcuts import render_to_response +from django.http import HttpResponse +from djagen.collector.models import * +from djagen.collector.forms import ContactForm +from djagen.collector.wrappers import render_response +from django.conf import settings +import magic +import os +import datetime, time +from django.core.paginator import Paginator, EmptyPage, InvalidPage + +import string + +def main(request): + selected_entries = Entries.objects.select_related() + entries_list1 = selected_entries.filter(entry_id__label_personal = 1) + entries_list2 = selected_entries.filter(entry_id__label_lkd = 1) + entries_list3 = selected_entries.filter(entry_id__label_community = 1) + entries_list = entries_list1 | entries_list2 | entries_list3 + + # This setting gets the content truncated which contains more than words. + truncate_words = 250 + items_per_page = 25 + + #get the last run time + run_time = RunTime.objects.all()[0] + + return render_to_response('main.tmpl' ,{ + 'entries_list':entries_list, + 'truncate_words':truncate_words, + 'items_per_page':repr(items_per_page), + 'run_time':run_time, + #'pag_entries_list':pag_entries_list, + + + + }) +def member_subscribe(request): + if request.method == 'POST': + form = ContactForm(request.POST, request.FILES) + #return HttpResponse(str(request.FILES)) + if form.is_valid(): + human = True + check = handle_uploaded_file(request.FILES['hackergotchi']) + + #save the author information + f = request.FILES['hackergotchi'] + if check[0]: + + #change the name of the file with the unique name created + f.name = check[1] + + author = Authors(author_name=request.POST['name'], author_surname=request.POST['surname'], author_email=request.POST['email'], channel_url=request.POST['feed'], author_face=f.name, is_approved=0, current_status=5) + else: + author = Author(author_name=request.POST['name'], author_surname=request.POST['surname'], author_email=request.POST['email'], channel_url=request.POST['feed'], is_approved=0, current_status=5) + try: + author.save() + + #save the history with explanation + author.history_set.create(action_type=5, action_date=datetime.datetime.now(), action_explanation=request.POST['message']) + except: + pass + #send mail part + #fill it here + return render_response(request, 'main/subscribe.html/',{'submit': 'done'}) + else: + form = ContactForm() + return render_response(request, 'main/subscribe.html', {'form': form}) + +def handle_uploaded_file(f): + + if not f.name: return False + #lets create a unique name for the image + t = str(time.time()).split(".") + img_name = t[0] + t[1].f.name.split(".")[1] + f.name = img_name + path = os.path.join(settings.FILE_UPLOAD_TEMP_DIR, f.name) + + destination = open(path, 'wb+') + for chunk in f.chunks(): + destination.write(chunk) + destination.close() + + m = magic.open(magic.MAGIC_MIME) + m.load() + t = m.file(path) + if t.split('/')[0] == 'image': + return (True, f.name) + else: + os.unlink(path) + return (False, '') + +def list_members(request): + + authors = Authors.objects.all() + + return render_response(request, 'main/members.html', {'members': authors}) + + +def archive(request,archive_year='',archive_month=''): + selected_entries = Entries.objects.select_related() + + # For entry categories + entries_list1 = selected_entries.filter(entry_id__label_personal = 1) + entries_list2 = selected_entries.filter(entry_id__label_lkd = 1) + entries_list3 = selected_entries.filter(entry_id__label_community = 1) + entries_list = entries_list1 | entries_list2 | entries_list3 + + error = '' + + # Validating arguments provided by urls.py. + if((archive_year != '' ) and (str(archive_year).isalnum()) and (not(str(archive_year).isalpha()))): + entries_list = entries_list.filter(date__year=archive_year) + else: + error = 1 + + if(archive_month != ''and (str(archive_year).isalnum()) and not(str(archive_year).isalpha())): + entries_list = entries_list.filter(date__month=archive_month) + ## error = 1 + + # This setting gets the content truncated which contains more than words. + truncate_words = 250 + items_per_page = 25 + + #get the last run time + run_time = RunTime.objects.all()[0] + + # Pagination + elements_in_a_page = 25 # This determines, how many elements will be displayed in a paginator page. + paginator = Paginator(entries_list,elements_in_a_page) + + # Validation for page number if it is not int return first page. + try: + page = int(request.GET.get('page', '1')) + except ValueError: + page = 1 + + # If page request is out of range, return last page . + try: + p_entries_list = paginator.page(page) + except (EmptyPage, InvalidPage): + p_entries_list = paginator.page(paginator.num_pages) + + + + + + return render_to_response('archive.tmpl' ,{ + 'entries_list':entries_list, + 'p_entries_list':p_entries_list, + 'truncate_words':truncate_words, + 'items_per_page':repr(items_per_page), + 'run_time':run_time, + 'archive_year':archive_year, + 'archive_month':archive_month, + 'error':error, + }) \ No newline at end of file diff --git a/DJAGEN/tags/djagen_old/djagen/collector/wrappers.py b/DJAGEN/tags/djagen_old/djagen/collector/wrappers.py new file mode 100755 index 0000000..af35741 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/collector/wrappers.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django.shortcuts import render_to_response +from django.template import RequestContext + +def render_response(req, *args, **kwargs): + """ + Wrapper function that automatically adds "context_instance" to render_to_response + """ + + kwargs['context_instance'] = RequestContext(req) + return render_to_response(*args, **kwargs) diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/__init__.py b/DJAGEN/tags/djagen_old/djagen/gezegen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/00101010.info,konu,teknik,index.rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/00101010.info,konu,teknik,index.rss new file mode 100755 index 0000000..9e5873d Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/00101010.info,konu,teknik,index.rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/6kere9.com,blag,feed,rss,Genel b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/6kere9.com,blag,feed,rss,Genel new file mode 100755 index 0000000..2e31cfd Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/6kere9.com,blag,feed,rss,Genel differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ahmet.pardusman.org,blog,feed,cat=2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ahmet.pardusman.org,blog,feed,cat=2 new file mode 100755 index 0000000..0245ab2 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ahmet.pardusman.org,blog,feed,cat=2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/anilozbek.blogspot.com,feeds,posts,default,-,gnu%2Flinux b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/anilozbek.blogspot.com,feeds,posts,default,-,gnu%2Flinux new file mode 100755 index 0000000..358c828 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/anilozbek.blogspot.com,feeds,posts,default,-,gnu%2Flinux differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/armish.linux-sevenler.org,blog,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/armish.linux-sevenler.org,blog,category,gezegen,feed new file mode 100755 index 0000000..68d63a3 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/armish.linux-sevenler.org,blog,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/armuting.blogspot.com,feeds,posts,default,-,lkd_gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/armuting.blogspot.com,feeds,posts,default,-,lkd_gezegen new file mode 100755 index 0000000..46f50d2 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/armuting.blogspot.com,feeds,posts,default,-,lkd_gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/bilisimlab.com,blog,rss.php b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/bilisimlab.com,blog,rss.php new file mode 100755 index 0000000..679efe6 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/bilisimlab.com,blog,rss.php differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.akgul.web.tr,cat=2&feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.akgul.web.tr,cat=2&feed=rss2 new file mode 100755 index 0000000..95344fe Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.akgul.web.tr,cat=2&feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.arsln.org,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.arsln.org,category,gezegen,feed new file mode 100755 index 0000000..2a8e57a Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.arsln.org,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.corporem.org,feed=rss2&cat=3 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.corporem.org,feed=rss2&cat=3 new file mode 100755 index 0000000..438b986 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.corporem.org,feed=rss2&cat=3 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.gunduz.org,index.php,feeds,categories,1-OEzguer-Yazlm.rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.gunduz.org,index.php,feeds,categories,1-OEzguer-Yazlm.rss new file mode 100755 index 0000000..ecde907 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.gunduz.org,index.php,feeds,categories,1-OEzguer-Yazlm.rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.halid.org,tag,linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.halid.org,tag,linux,feed new file mode 100755 index 0000000..e712984 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.halid.org,tag,linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.oguz.biz,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.oguz.biz,category,gezegen,feed new file mode 100755 index 0000000..b10fd19 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.oguz.biz,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.oguz.biz,category,gezegen,rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.oguz.biz,category,gezegen,rss2 new file mode 100755 index 0000000..b10fd19 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.oguz.biz,category,gezegen,rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.oguz.name.tr,feed=atom&cat=7 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.oguz.name.tr,feed=atom&cat=7 new file mode 100755 index 0000000..4903276 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.oguz.name.tr,feed=atom&cat=7 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.ratonred.com,tag,gezegen-linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.ratonred.com,tag,gezegen-linux,feed new file mode 100755 index 0000000..2f925ff Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blog.ratonred.com,tag,gezegen-linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blogs.portakalteknoloji.com,bora,blog,feed,rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blogs.portakalteknoloji.com,bora,blog,feed,rss new file mode 100755 index 0000000..2c25618 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/blogs.portakalteknoloji.com,bora,blog,feed,rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/canerblt.wordpress.com,tag,linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/canerblt.wordpress.com,tag,linux,feed new file mode 100755 index 0000000..46d7a29 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/canerblt.wordpress.com,tag,linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/cankavaklioglu.name.tr,guncelgunce,archives,linux,index-rss.xml b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/cankavaklioglu.name.tr,guncelgunce,archives,linux,index-rss.xml new file mode 100755 index 0000000..0350e25 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/cankavaklioglu.name.tr,guncelgunce,archives,linux,index-rss.xml differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/cekirdek.pardus.org.tr,~meren,blog,feed,rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/cekirdek.pardus.org.tr,~meren,blog,feed,rss new file mode 100755 index 0000000..ce8e493 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/cekirdek.pardus.org.tr,~meren,blog,feed,rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/cemosonmez.blogspot.com,feeds,posts,default,-,gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/cemosonmez.blogspot.com,feeds,posts,default,-,gezegen new file mode 100755 index 0000000..d36cee4 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/cemosonmez.blogspot.com,feeds,posts,default,-,gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/devador.blogspot.com,feeds,posts,default,-,linux b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/devador.blogspot.com,feeds,posts,default,-,linux new file mode 100755 index 0000000..dfc75be Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/devador.blogspot.com,feeds,posts,default,-,linux differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ekin.fisek.com.tr,blog,feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ekin.fisek.com.tr,blog,feed=rss2 new file mode 100755 index 0000000..2be0e3b Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ekin.fisek.com.tr,blog,feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ekin.fisek.com.tr,blog,wp-rss2.php,cat=5 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ekin.fisek.com.tr,blog,wp-rss2.php,cat=5 new file mode 100755 index 0000000..2be0e3b Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ekin.fisek.com.tr,blog,wp-rss2.php,cat=5 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/emrahcom.blogspot.com,feeds,posts,default,-,lkd,alt=rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/emrahcom.blogspot.com,feeds,posts,default,-,lkd,alt=rss new file mode 100755 index 0000000..6747bae Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/emrahcom.blogspot.com,feeds,posts,default,-,lkd,alt=rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ercankuru.com.tr,index,category,gezegen,lkd-gezegeni,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ercankuru.com.tr,index,category,gezegen,lkd-gezegeni,feed new file mode 100755 index 0000000..74f1508 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ercankuru.com.tr,index,category,gezegen,lkd-gezegeni,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/eumur.wordpress.com,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/eumur.wordpress.com,feed new file mode 100755 index 0000000..7b9f510 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/eumur.wordpress.com,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,SerkanLinuxGezegeni b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,SerkanLinuxGezegeni new file mode 100755 index 0000000..9ae56c3 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,SerkanLinuxGezegeni differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,Syslogs b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,Syslogs new file mode 100755 index 0000000..e69e245 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,Syslogs differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,TheUselessJournalV4 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,TheUselessJournalV4 new file mode 100755 index 0000000..4b119c4 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,TheUselessJournalV4 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,canburak-gezegen-linux b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,canburak-gezegen-linux new file mode 100755 index 0000000..b17d290 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,canburak-gezegen-linux differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,ndemirgezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,ndemirgezegen new file mode 100755 index 0000000..835a90b Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,ndemirgezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,nesimia-gezegen,format=xml b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,nesimia-gezegen,format=xml new file mode 100755 index 0000000..0f30cb0 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,nesimia-gezegen,format=xml differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,oguzy-gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,oguzy-gezegen new file mode 100755 index 0000000..9665280 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,oguzy-gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,pinguar-gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,pinguar-gezegen new file mode 100755 index 0000000..9415169 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,pinguar-gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,tayfurtaybua b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,tayfurtaybua new file mode 100755 index 0000000..3a880d9 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds.feedburner.com,tayfurtaybua differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds2.feedburner.com,ekovanci,format=xml b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds2.feedburner.com,ekovanci,format=xml new file mode 100755 index 0000000..9b993aa Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds2.feedburner.com,ekovanci,format=xml differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds2.feedburner.com,raptiye_linux_gezegeni b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds2.feedburner.com,raptiye_linux_gezegeni new file mode 100755 index 0000000..07c349f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/feeds2.feedburner.com,raptiye_linux_gezegeni differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/flyeater.wordpress.com,tag,lkd,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/flyeater.wordpress.com,tag,lkd,feed new file mode 100755 index 0000000..d9c6178 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/flyeater.wordpress.com,tag,lkd,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gnome.org.tr,index.php,option=com_rss&feed=RSS2.0&no_html=1) b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gnome.org.tr,index.php,option=com_rss&feed=RSS2.0&no_html=1) new file mode 100755 index 0000000..a328c2a Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gnome.org.tr,index.php,option=com_rss&feed=RSS2.0&no_html=1) differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gokdenix.blogspot.com,feeds,posts,default,-,gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gokdenix.blogspot.com,feeds,posts,default,-,gezegen new file mode 100755 index 0000000..abdcb8b Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gokdenix.blogspot.com,feeds,posts,default,-,gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gungorbasa.blogspot.com,feeds,posts,default,-,Gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gungorbasa.blogspot.com,feeds,posts,default,-,Gezegen new file mode 100755 index 0000000..fd229fa Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gungorbasa.blogspot.com,feeds,posts,default,-,Gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,ftp,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,ftp,feed new file mode 100755 index 0000000..8b0e7b0 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,ftp,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,gezegen,feed new file mode 100755 index 0000000..4e0e58f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,seminer,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,seminer,feed new file mode 100755 index 0000000..5fd7caa Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,seminer,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,senlik,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,senlik,feed new file mode 100755 index 0000000..fd8a6b7 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,senlik,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,sponsor,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,sponsor,feed new file mode 100755 index 0000000..63fa89d Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,sponsor,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,web,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,web,feed new file mode 100755 index 0000000..a0548d9 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,web,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,yk,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,yk,feed new file mode 100755 index 0000000..7551f47 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,category,yk,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,ftp,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,ftp,feed new file mode 100755 index 0000000..8b0e7b0 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,ftp,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,gezegen,feed new file mode 100755 index 0000000..4e0e58f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,seminer,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,seminer,feed new file mode 100755 index 0000000..5fd7caa Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,seminer,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,senlik,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,senlik,feed new file mode 100755 index 0000000..fd8a6b7 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,senlik,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,sponsor,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,sponsor,feed new file mode 100755 index 0000000..63fa89d Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,sponsor,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,web,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,web,feed new file mode 100755 index 0000000..a0548d9 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,web,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,yk,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,yk,feed new file mode 100755 index 0000000..7551f47 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gunluk.lkd.org.tr,yk,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gurcanozturk.com,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gurcanozturk.com,feed new file mode 100755 index 0000000..9aee4a6 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/gurcanozturk.com,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/handlet.blogspot.com,feeds,posts,default,alt=rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/handlet.blogspot.com,feeds,posts,default,alt=rss new file mode 100755 index 0000000..e71e5d0 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/handlet.blogspot.com,feeds,posts,default,alt=rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ilkinbalkanay.blogspot.com,feeds,posts,default,-,Gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ilkinbalkanay.blogspot.com,feeds,posts,default,-,Gezegen new file mode 100755 index 0000000..3c229d2 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ilkinbalkanay.blogspot.com,feeds,posts,default,-,Gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/kapadokyayazilim.com,gunluk,omerakyuz,category,linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/kapadokyayazilim.com,gunluk,omerakyuz,category,linux,feed new file mode 100755 index 0000000..8df8a2b Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/kapadokyayazilim.com,gunluk,omerakyuz,category,linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/kivi.com.tr,blog,feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/kivi.com.tr,blog,feed=rss2 new file mode 100755 index 0000000..3cc5951 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/kivi.com.tr,blog,feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/kubilaykocabalkan.wordpress.com,tag,pardus,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/kubilaykocabalkan.wordpress.com,tag,pardus,feed new file mode 100755 index 0000000..2fd0c24 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/kubilaykocabalkan.wordpress.com,tag,pardus,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/leoman.gen.tr,Gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/leoman.gen.tr,Gezegen,feed new file mode 100755 index 0000000..47ea692 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/leoman.gen.tr,Gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/linuxogrenmekistiyorum.com,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/linuxogrenmekistiyorum.com,feed new file mode 100755 index 0000000..891c3c1 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/linuxogrenmekistiyorum.com,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/marenostrum.blogsome.com,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/marenostrum.blogsome.com,category,gezegen,feed new file mode 100755 index 0000000..c37d73f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/marenostrum.blogsome.com,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/mhazer.blogspot.com,feeds,posts,default,-,gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/mhazer.blogspot.com,feeds,posts,default,-,gezegen new file mode 100755 index 0000000..f517300 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/mhazer.blogspot.com,feeds,posts,default,-,gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/mmakbas.wordpress.com,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/mmakbas.wordpress.com,category,gezegen,feed new file mode 100755 index 0000000..d2b73f6 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/mmakbas.wordpress.com,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/mmakbas.wordpress.com,tag,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/mmakbas.wordpress.com,tag,gezegen,feed new file mode 100755 index 0000000..d2b73f6 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/mmakbas.wordpress.com,tag,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/murattikil.blogspot.com,feeds,posts,default b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/murattikil.blogspot.com,feeds,posts,default new file mode 100755 index 0000000..89f397d Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/murattikil.blogspot.com,feeds,posts,default differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/nightwalkers.blogspot.com,atom.xml b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/nightwalkers.blogspot.com,atom.xml new file mode 100755 index 0000000..a0c178c Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/nightwalkers.blogspot.com,atom.xml differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/nyucel.blogspot.com,feeds,posts,default,-,gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/nyucel.blogspot.com,feeds,posts,default,-,gezegen new file mode 100755 index 0000000..16c12c8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/nyucel.blogspot.com,feeds,posts,default,-,gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/onuraslan.com,blog,etiket,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/onuraslan.com,blog,etiket,gezegen,feed new file mode 100755 index 0000000..022ac92 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/onuraslan.com,blog,etiket,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/osjunkies.com,blog,author,findik,feed,rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/osjunkies.com,blog,author,findik,feed,rss new file mode 100755 index 0000000..ca60177 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/osjunkies.com,blog,author,findik,feed,rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ozgurmurat.blogspot.com,feeds,posts,default,-,lkd_gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ozgurmurat.blogspot.com,feeds,posts,default,-,lkd_gezegen new file mode 100755 index 0000000..d7edd3c Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ozgurmurat.blogspot.com,feeds,posts,default,-,lkd_gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ozguryazilim.com,feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ozguryazilim.com,feed=rss2 new file mode 100755 index 0000000..cbeb6fc Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/ozguryazilim.com,feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/panhaema.com,rss.php,mcat=linux b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/panhaema.com,rss.php,mcat=linux new file mode 100755 index 0000000..81100c9 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/panhaema.com,rss.php,mcat=linux differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/pardusever.blogspot.com,feeds,posts,default,-,gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/pardusever.blogspot.com,feeds,posts,default,-,gezegen new file mode 100755 index 0000000..2139de7 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/pardusever.blogspot.com,feeds,posts,default,-,gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/sehitoglu.web.tr,gunluk,feed=rss2&cat=12 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/sehitoglu.web.tr,gunluk,feed=rss2&cat=12 new file mode 100755 index 0000000..e297d6f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/sehitoglu.web.tr,gunluk,feed=rss2&cat=12 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/seridarus.blogspot.com,feeds,posts,default,-,gezegen b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/seridarus.blogspot.com,feeds,posts,default,-,gezegen new file mode 100755 index 0000000..7f48364 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/seridarus.blogspot.com,feeds,posts,default,-,gezegen differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/serkan.feyvi.org,blog,category,debian,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/serkan.feyvi.org,blog,category,debian,feed new file mode 100755 index 0000000..54a01ef Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/serkan.feyvi.org,blog,category,debian,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/serkank.wordpress.com,category,linux,feed,atom b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/serkank.wordpress.com,category,linux,feed,atom new file mode 100755 index 0000000..aa34b73 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/serkank.wordpress.com,category,linux,feed,atom differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/serveracim.blogspot.com,feeds,posts,default,alt=rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/serveracim.blogspot.com,feeds,posts,default,alt=rss new file mode 100755 index 0000000..d8b1c1d Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/serveracim.blogspot.com,feeds,posts,default,alt=rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/siyahsapka.blogspot.com,feeds,posts,default,-,Gezegen,alt=rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/siyahsapka.blogspot.com,feeds,posts,default,-,Gezegen,alt=rss new file mode 100755 index 0000000..6c98d0a Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/siyahsapka.blogspot.com,feeds,posts,default,-,Gezegen,alt=rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/talat.uyarer.com,feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/talat.uyarer.com,feed=rss2 new file mode 100755 index 0000000..448d465 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/talat.uyarer.com,feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/tonguc.name,blog,flav=atom b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/tonguc.name,blog,flav=atom new file mode 100755 index 0000000..1607484 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/tonguc.name,blog,flav=atom differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/twitter.com,statuses,user_timeline,23496360.rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/twitter.com,statuses,user_timeline,23496360.rss new file mode 100755 index 0000000..80c0a22 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/twitter.com,statuses,user_timeline,23496360.rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/web.inonu.edu.tr,~mkarakaplan,blog,wp-rss2.php b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/web.inonu.edu.tr,~mkarakaplan,blog,wp-rss2.php new file mode 100755 index 0000000..5e19036 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/web.inonu.edu.tr,~mkarakaplan,blog,wp-rss2.php differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.ademalpyildiz.com.tr,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.ademalpyildiz.com.tr,feed new file mode 100755 index 0000000..08321da Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.ademalpyildiz.com.tr,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.alpersomuncu.com,weblog,index.php,feeds,categories,8-Linux.rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.alpersomuncu.com,weblog,index.php,feeds,categories,8-Linux.rss new file mode 100755 index 0000000..daf3e82 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.alpersomuncu.com,weblog,index.php,feeds,categories,8-Linux.rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.amerikadabirgun.com,category,turkce,linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.amerikadabirgun.com,category,turkce,linux,feed new file mode 100755 index 0000000..fffe307 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.amerikadabirgun.com,category,turkce,linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.amerikadabirgun.com,tag,turkce,linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.amerikadabirgun.com,tag,turkce,linux,feed new file mode 100755 index 0000000..fffe307 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.amerikadabirgun.com,tag,turkce,linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.ayder.org,gunluk,feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.ayder.org,gunluk,feed=rss2 new file mode 100755 index 0000000..e1d00b0 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.ayder.org,gunluk,feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.bahri.info,category,linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.bahri.info,category,linux,feed new file mode 100755 index 0000000..5f14d82 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.bahri.info,category,linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.bayramkaragoz.org,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.bayramkaragoz.org,category,gezegen,feed new file mode 100755 index 0000000..b4229ce Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.bayramkaragoz.org,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.birazkisisel.com,tag,linux-gezegeni,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.birazkisisel.com,tag,linux-gezegeni,feed new file mode 100755 index 0000000..c27094f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.birazkisisel.com,tag,linux-gezegeni,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.blockdiagram.net,blog,rss.xml b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.blockdiagram.net,blog,rss.xml new file mode 100755 index 0000000..ec654a2 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.blockdiagram.net,blog,rss.xml differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.bugunlinux.com,feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.bugunlinux.com,feed=rss2 new file mode 100755 index 0000000..e130f58 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.bugunlinux.com,feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.burakdayioglu.net,category,linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.burakdayioglu.net,category,linux,feed new file mode 100755 index 0000000..769d2e6 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.burakdayioglu.net,category,linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.efeciftci.com,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.efeciftci.com,category,gezegen,feed new file mode 100755 index 0000000..f002848 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.efeciftci.com,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.erdinc.info,cat=6&feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.erdinc.info,cat=6&feed=rss2 new file mode 100755 index 0000000..1254119 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.erdinc.info,cat=6&feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.erhanekici.com,blog,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.erhanekici.com,blog,category,gezegen,feed new file mode 100755 index 0000000..9c3bdad Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.erhanekici.com,blog,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.faikuygur.com,blog,feed,cat=-4 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.faikuygur.com,blog,feed,cat=-4 new file mode 100755 index 0000000..03d996a Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.faikuygur.com,blog,feed,cat=-4 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.furkancaliskan.com,blog,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.furkancaliskan.com,blog,category,gezegen,feed new file mode 100755 index 0000000..9c0f86a Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.furkancaliskan.com,blog,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.gokmengorgen.net,gunluk,index.php,category_name=oi&feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.gokmengorgen.net,gunluk,index.php,category_name=oi&feed=rss2 new file mode 100755 index 0000000..cbfcfee Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.gokmengorgen.net,gunluk,index.php,category_name=oi&feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.hakanuygun.com,blog,feed=atom&cat=13 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.hakanuygun.com,blog,feed=atom&cat=13 new file mode 100755 index 0000000..ab7fb7e Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.hakanuygun.com,blog,feed=atom&cat=13 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.heartsmagic.net,category,linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.heartsmagic.net,category,linux,feed new file mode 100755 index 0000000..c626680 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.heartsmagic.net,category,linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.kirmizivesiyah.org,index.php,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.kirmizivesiyah.org,index.php,category,gezegen,feed new file mode 100755 index 0000000..c4c0c29 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.kirmizivesiyah.org,index.php,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.koray.org,blog,wp-rss2.php,cat=7 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.koray.org,blog,wp-rss2.php,cat=7 new file mode 100755 index 0000000..85ff9bb Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.koray.org,blog,wp-rss2.php,cat=7 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.linuxipuclari.com,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.linuxipuclari.com,category,gezegen,feed new file mode 100755 index 0000000..860e114 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.linuxipuclari.com,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.lkd.org.tr,news,aggregator,RSS b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.lkd.org.tr,news,aggregator,RSS new file mode 100755 index 0000000..cae373b Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.lkd.org.tr,news,aggregator,RSS differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.murekkep.org,konu,acik-kaynak-ve-linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.murekkep.org,konu,acik-kaynak-ve-linux,feed new file mode 100755 index 0000000..ce3e49a Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.murekkep.org,konu,acik-kaynak-ve-linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.okanakyuz.com,feed=rss2&cat=17 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.okanakyuz.com,feed=rss2&cat=17 new file mode 100755 index 0000000..0530fc6 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.okanakyuz.com,feed=rss2&cat=17 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.ozgurkuru.net,ozgur,category,linuxgezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.ozgurkuru.net,ozgur,category,linuxgezegen,feed new file mode 100755 index 0000000..17b2aa5 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.ozgurkuru.net,ozgur,category,linuxgezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.pardus-linux.org,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.pardus-linux.org,feed new file mode 100755 index 0000000..26508a4 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.pardus-linux.org,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.python-tr.com,feed,atom b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.python-tr.com,feed,atom new file mode 100755 index 0000000..6fa6381 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.python-tr.com,feed,atom differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.r-3.org,blog,cat=4&feed=rss2 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.r-3.org,blog,cat=4&feed=rss2 new file mode 100755 index 0000000..79ac10c Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.r-3.org,blog,cat=4&feed=rss2 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.samkon.org,feed=rss2&cat=778 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.samkon.org,feed=rss2&cat=778 new file mode 100755 index 0000000..3be46de Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.samkon.org,feed=rss2&cat=778 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.sinanonur.com,konu,linuxgezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.sinanonur.com,konu,linuxgezegen,feed new file mode 100755 index 0000000..9e9e0d4 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.sinanonur.com,konu,linuxgezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.soyoz.com,gunce,etiket,linux-gezegeni,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.soyoz.com,gunce,etiket,linux-gezegeni,feed new file mode 100755 index 0000000..08416f5 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.soyoz.com,gunce,etiket,linux-gezegeni,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.syslogs.org,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.syslogs.org,feed new file mode 100755 index 0000000..e69e245 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.syslogs.org,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.teknozat.com,kategori,linux,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.teknozat.com,kategori,linux,feed new file mode 100755 index 0000000..cebe5a6 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.teknozat.com,kategori,linux,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.tuxworkshop.com,blog,cat=8 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.tuxworkshop.com,blog,cat=8 new file mode 100755 index 0000000..435886f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.tuxworkshop.com,blog,cat=8 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.yalazi.org,index.php,archives,category,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.yalazi.org,index.php,archives,category,gezegen,feed new file mode 100755 index 0000000..fc7f398 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/www.yalazi.org,index.php,archives,category,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/yildirim.isadamlari.org,tag,gezegen,feed b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/yildirim.isadamlari.org,tag,gezegen,feed new file mode 100755 index 0000000..497d840 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/yildirim.isadamlari.org,tag,gezegen,feed differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/zembereknlp.blogspot.com,feeds,posts,default,alt=rss b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/zembereknlp.blogspot.com,feeds,posts,default,alt=rss new file mode 100755 index 0000000..8d68d23 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/zembereknlp.blogspot.com,feeds,posts,default,alt=rss differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/cache/zzz.fisek.com.tr,seyir-defteri,feed=rss2&cat=3 b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/zzz.fisek.com.tr,seyir-defteri,feed=rss2&cat=3 new file mode 100755 index 0000000..1273f46 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/cache/zzz.fisek.com.tr,seyir-defteri,feed=rss2&cat=3 differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/__init__.py b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/atom.xml.tmpl b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/atom.xml.tmpl new file mode 100755 index 0000000..c444d01 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/atom.xml.tmpl @@ -0,0 +1,61 @@ + + + + <TMPL_VAR name> + "/> + "/> + + + + + + xml:lang=""> + xml:lang="<TMPL_VAR title_language>"</TMPL_IF>><TMPL_VAR title ESCAPE="HTML"> + "/> + + + xml:lang=""> + + + + + + + + + + + + + + + + + + + + + <TMPL_VAR channel_title ESCAPE="HTML"> + + <TMPL_VAR channel_name ESCAPE="HTML"> + + + + + "/> + + + + + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/atom.xml.tmplc b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/atom.xml.tmplc new file mode 100755 index 0000000..4939e63 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/atom.xml.tmplc differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/config.ini b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/config.ini new file mode 100755 index 0000000..c54fd3b --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/config.ini @@ -0,0 +1,42 @@ +[Planet] +name = Linux Gezegeni +link = http://gezegen.linux.org.tr +owner_name = Gezegen Ekibi +owner_email = gezegen@linux.org.tr +cache_directory = cache +new_feed_items = 1 +log_level = DEBUG +template_files = gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl +output_dir = www/ +# items_per_page = 15 +items_per_page = 25 +#days_per_page = 0 +feed_timeout = 20 + +# future_dates = ignore_date +# ignore_in_feed = updated + +encoding = utf-8 +locale = tr_TR.UTF-8 + +date_format = %d %b %Y @ %I:%M %p +#date_format = %B %d, %Y %I:%M %p +new_date_format = %d %B %Y + +[DEFAULT] +facewidth = 64 +faceheight = 64 + + +[http://www.hakanuygun.com/blog/?feed=atom&cat=13] +name = Hakan Uygun +nick = huygun +label = Personal +id = 1 + +[http://feeds.feedburner.com/oguzy-gezegen] +name = Oğuz Yarımtepe +face = oguzyarimtepe.png +nick = oyarimtepe +label = Personal +id = 2 diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/config_entries.xml b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/config_entries.xml new file mode 100755 index 0000000..f9848a4 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/config_entries.xml @@ -0,0 +1,17 @@ + + + [http://www.bugunlinux.com/?feed=rss2] + Ahmet Yıldız + ayildiz + + 1 + + + + [http://www.bugunlinux.com/?feed=rss3] + Ahmet Yıldızz + ayildizz + + 2 + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/config_header.xml b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/config_header.xml new file mode 100755 index 0000000..949e8cf --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/config_header.xml @@ -0,0 +1,28 @@ + +
+ [Planet] + Linux Gezegeni + http://gezegen.linux.org.tr + Gezegen Ekibi + gezegen@linux.org.tr + cache + 1 + DEBUG + gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl + www/ + 25 + 20 + + utf-8 + tr_TR.UTF-8 + + %d %b %Y @ %I:%M %p + new_date_format = %d %B %Y +
+ +
+ [DEFAULT] + 64 + 64 +
+
diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/feeds.html.tmpl b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/feeds.html.tmpl new file mode 100755 index 0000000..acd9479 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/feeds.html.tmpl @@ -0,0 +1,22 @@ +
+ +
diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/feeds.html.tmplc b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/feeds.html.tmplc new file mode 100755 index 0000000..155f4e4 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/feeds.html.tmplc differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/foafroll.xml.tmpl b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/foafroll.xml.tmpl new file mode 100755 index 0000000..f344738 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/foafroll.xml.tmpl @@ -0,0 +1,31 @@ + + + + + + " /> + + + + + + + "> + + + " /> + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/foafroll.xml.tmplc b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/foafroll.xml.tmplc new file mode 100755 index 0000000..d85d57a Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/foafroll.xml.tmplc differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/index.html.tmpl b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/index.html.tmpl new file mode 100755 index 0000000..7726f6b --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/index.html.tmpl @@ -0,0 +1,356 @@ + + + + <TMPL_VAR name> + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+ + +

+
+ +
+ + +
+
+
+ +

+ "> + + +

+ +
+   +
+
+
+
+
+ +
+
+
+ + "> + + + + &title=" target="_blank"> + + + " target="_blank"> + + +
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/index.html.tmplc b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/index.html.tmplc new file mode 100755 index 0000000..259931d Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/index.html.tmplc differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/opml.xml.tmpl b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/opml.xml.tmpl new file mode 100755 index 0000000..50bbabe --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/opml.xml.tmpl @@ -0,0 +1,16 @@ + + + + <TMPL_VAR name> + + + + + + + + + " xmlUrl=""/> + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/opml.xml.tmplc b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/opml.xml.tmplc new file mode 100755 index 0000000..f9309f9 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/opml.xml.tmplc differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss10.xml.tmpl b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss10.xml.tmpl new file mode 100755 index 0000000..0cd709b --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss10.xml.tmpl @@ -0,0 +1,37 @@ + + +"> + <TMPL_VAR name> + + - + + + + + " /> + + + + + + +"> + <TMPL_VAR channel_name><TMPL_IF title>: <TMPL_VAR title></TMPL_IF> + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss10.xml.tmplc b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss10.xml.tmplc new file mode 100755 index 0000000..18444f3 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss10.xml.tmplc differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss20.xml.tmpl b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss20.xml.tmpl new file mode 100755 index 0000000..3ff7a11 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss20.xml.tmpl @@ -0,0 +1,30 @@ + + + + + <TMPL_VAR name> + + en + - + + + + <TMPL_VAR channel_name><TMPL_IF title>: <TMPL_VAR title></TMPL_IF> + + + + + + " align="right" width="" height="">]]> + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss20.xml.tmplc b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss20.xml.tmplc new file mode 100755 index 0000000..21f007a Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/rss20.xml.tmplc differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/sidebar.html.tmpl b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/sidebar.html.tmpl new file mode 100755 index 0000000..acfdf4c --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/sidebar.html.tmpl @@ -0,0 +1,17 @@ + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/sidebar.html.tmplc b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/sidebar.html.tmplc new file mode 100755 index 0000000..50754dd Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/sidebar.html.tmplc differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/simple.html.tmpl b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/simple.html.tmpl new file mode 100755 index 0000000..2c20c6a --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/simple.html.tmpl @@ -0,0 +1,74 @@ + + + + + + + <TMPL_VAR name> + + + + + + + + + + + + + + + +

+
+ +
+ + + +
+
+ +

">

+
+
+
+ +
+ + + +
+ +
+
+ + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/simple.html.tmplc b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/simple.html.tmplc new file mode 100755 index 0000000..d466e42 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/simple.html.tmplc differ diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/zaman.sh b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/zaman.sh new file mode 100755 index 0000000..e0c9a2b --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/gezegen/zaman.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +while read x +do + echo "$(date)::$x" +done diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet-cache.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet-cache.py new file mode 100755 index 0000000..9334583 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet-cache.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Planet cache tool. + +""" + +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + + +import os +import sys +import time +import dbhash +import ConfigParser + +import planet + + +def usage(): + print "Usage: planet-cache [options] CACHEFILE [ITEMID]..." + print + print "Examine and modify information in the Planet cache." + print + print "Channel Commands:" + print " -C, --channel Display known information on the channel" + print " -L, --list List items in the channel" + print " -K, --keys List all keys found in channel items" + print + print "Item Commands (need ITEMID):" + print " -I, --item Display known information about the item(s)" + print " -H, --hide Mark the item(s) as hidden" + print " -U, --unhide Mark the item(s) as not hidden" + print + print "Other Options:" + print " -h, --help Display this help message and exit" + sys.exit(0) + +def usage_error(msg, *args): + print >>sys.stderr, msg, " ".join(args) + print >>sys.stderr, "Perhaps you need --help ?" + sys.exit(1) + +def print_keys(item, title): + keys = item.keys() + keys.sort() + key_len = max([ len(k) for k in keys ]) + + print title + ":" + for key in keys: + if item.key_type(key) == item.DATE: + value = time.strftime(planet.TIMEFMT_ISO, item[key]) + else: + value = str(item[key]) + print " %-*s %s" % (key_len, key, fit_str(value, 74 - key_len)) + +def fit_str(string, length): + if len(string) <= length: + return string + else: + return string[:length-4] + " ..." + + +if __name__ == "__main__": + cache_file = None + want_ids = 0 + ids = [] + + command = None + + for arg in sys.argv[1:]: + if arg == "-h" or arg == "--help": + usage() + elif arg == "-C" or arg == "--channel": + if command is not None: + usage_error("Only one command option may be supplied") + command = "channel" + elif arg == "-L" or arg == "--list": + if command is not None: + usage_error("Only one command option may be supplied") + command = "list" + elif arg == "-K" or arg == "--keys": + if command is not None: + usage_error("Only one command option may be supplied") + command = "keys" + elif arg == "-I" or arg == "--item": + if command is not None: + usage_error("Only one command option may be supplied") + command = "item" + want_ids = 1 + elif arg == "-H" or arg == "--hide": + if command is not None: + usage_error("Only one command option may be supplied") + command = "hide" + want_ids = 1 + elif arg == "-U" or arg == "--unhide": + if command is not None: + usage_error("Only one command option may be supplied") + command = "unhide" + want_ids = 1 + elif arg.startswith("-"): + usage_error("Unknown option:", arg) + else: + if cache_file is None: + cache_file = arg + elif want_ids: + ids.append(arg) + else: + usage_error("Unexpected extra argument:", arg) + + if cache_file is None: + usage_error("Missing expected cache filename") + elif want_ids and not len(ids): + usage_error("Missing expected entry ids") + + # Open the cache file directly to get the URL it represents + try: + db = dbhash.open(cache_file) + url = db["url"] + db.close() + except dbhash.bsddb._db.DBError, e: + print >>sys.stderr, cache_file + ":", e.args[1] + sys.exit(1) + except KeyError: + print >>sys.stderr, cache_file + ": Probably not a cache file" + sys.exit(1) + + # Now do it the right way :-) + my_planet = planet.Planet(ConfigParser.ConfigParser()) + my_planet.cache_directory = os.path.dirname(cache_file) + channel = planet.Channel(my_planet, url) + + for item_id in ids: + if not channel.has_item(item_id): + print >>sys.stderr, item_id + ": Not in channel" + sys.exit(1) + + # Do the user's bidding + if command == "channel": + print_keys(channel, "Channel Keys") + + elif command == "item": + for item_id in ids: + item = channel.get_item(item_id) + print_keys(item, "Item Keys for %s" % item_id) + + elif command == "list": + print "Items in Channel:" + for item in channel.items(hidden=1, sorted=1): + print " " + item.id + print " " + time.strftime(planet.TIMEFMT_ISO, item.date) + if hasattr(item, "title"): + print " " + fit_str(item.title, 70) + if hasattr(item, "hidden"): + print " (hidden)" + + elif command == "keys": + keys = {} + for item in channel.items(): + for key in item.keys(): + keys[key] = 1 + + keys = keys.keys() + keys.sort() + + print "Keys used in Channel:" + for key in keys: + print " " + key + print + + print "Use --item to output values of particular items." + + elif command == "hide": + for item_id in ids: + item = channel.get_item(item_id) + if hasattr(item, "hidden"): + print item_id + ": Already hidden." + else: + item.hidden = "yes" + + channel.cache_write() + print "Done." + + elif command == "unhide": + for item_id in ids: + item = channel.get_item(item_id) + if hasattr(item, "hidden"): + del(item.hidden) + else: + print item_id + ": Not hidden." + + channel.cache_write() + print "Done." diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet.py new file mode 100755 index 0000000..85f7299 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python +"""The Planet aggregator. + +A flexible and easy-to-use aggregator for generating websites. + +Visit http://www.planetplanet.org/ for more information and to download +the latest version. + +Requires Python 2.1, recommends 2.3. +""" + +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + +import datetime + +import os +import sys +import time +import locale +import urlparse + +import planet + +from ConfigParser import ConfigParser + +# Default configuration file path +CONFIG_FILE = "config.ini" + +# Defaults for the [Planet] config section +PLANET_NAME = "Unconfigured Planet" +PLANET_LINK = "Unconfigured Planet" +PLANET_FEED = None +OWNER_NAME = "Anonymous Coward" +OWNER_EMAIL = "" +LOG_LEVEL = "WARNING" +FEED_TIMEOUT = 20 # seconds + +# Default template file list +TEMPLATE_FILES = "examples/basic/planet.html.tmpl" + +#part for django api usage +import sys +import os +# In order to reduce integration issues, this path gets defined automatically. +sys.path.append(os.path.abspath('../..')) + +os.environ['DJANGO_SETTINGS_MODULE'] = 'djagen.settings' +from djagen.collector.models import * + +def config_get(config, section, option, default=None, raw=0, vars=None): + """Get a value from the configuration, with a default.""" + if config.has_option(section, option): + return config.get(section, option, raw=raw, vars=None) + else: + return default + +def main(): + config_file = CONFIG_FILE + offline = 0 + verbose = 0 + + for arg in sys.argv[1:]: + if arg == "-h" or arg == "--help": + print "Usage: planet [options] [CONFIGFILE]" + print + print "Options:" + print " -v, --verbose DEBUG level logging during update" + print " -o, --offline Update the Planet from the cache only" + print " -h, --help Display this help message and exit" + print + sys.exit(0) + elif arg == "-v" or arg == "--verbose": + verbose = 1 + elif arg == "-o" or arg == "--offline": + offline = 1 + elif arg.startswith("-"): + print >>sys.stderr, "Unknown option:", arg + sys.exit(1) + else: + config_file = arg + + # Read the configuration file + config = ConfigParser() + config.read(config_file) + if not config.has_section("Planet"): + print >>sys.stderr, "Configuration missing [Planet] section." + sys.exit(1) + + # Read the [Planet] config section + planet_name = config_get(config, "Planet", "name", PLANET_NAME) + planet_link = config_get(config, "Planet", "link", PLANET_LINK) + planet_feed = config_get(config, "Planet", "feed", PLANET_FEED) + owner_name = config_get(config, "Planet", "owner_name", OWNER_NAME) + owner_email = config_get(config, "Planet", "owner_email", OWNER_EMAIL) + if verbose: + log_level = "DEBUG" + else: + log_level = config_get(config, "Planet", "log_level", LOG_LEVEL) + feed_timeout = config_get(config, "Planet", "feed_timeout", FEED_TIMEOUT) + template_files = config_get(config, "Planet", "template_files", + TEMPLATE_FILES).split(" ") + + # Default feed to the first feed for which there is a template + if not planet_feed: + for template_file in template_files: + name = os.path.splitext(os.path.basename(template_file))[0] + if name.find('atom')>=0 or name.find('rss')>=0: + planet_feed = urlparse.urljoin(planet_link, name) + break + + # Define locale + if config.has_option("Planet", "locale"): + # The user can specify more than one locale (separated by ":") as + # fallbacks. + locale_ok = False + for user_locale in config.get("Planet", "locale").split(':'): + user_locale = user_locale.strip() + try: + locale.setlocale(locale.LC_ALL, user_locale) + except locale.Error: + pass + else: + locale_ok = True + break + if not locale_ok: + print >>sys.stderr, "Unsupported locale setting." + sys.exit(1) + + # Activate logging + planet.logging.basicConfig() + planet.logging.getLogger().setLevel(planet.logging.getLevelName(log_level)) + log = planet.logging.getLogger("planet.runner") + try: + log.warning + except: + log.warning = log.warn + + # timeoutsocket allows feedparser to time out rather than hang forever on + # ultra-slow servers. Python 2.3 now has this functionality available in + # the standard socket library, so under 2.3 you don't need to install + # anything. But you probably should anyway, because the socket module is + # buggy and timeoutsocket is better. + if feed_timeout: + try: + feed_timeout = float(feed_timeout) + except: + log.warning("Feed timeout set to invalid value '%s', skipping", feed_timeout) + feed_timeout = None + + if feed_timeout and not offline: + try: + from planet import timeoutsocket + timeoutsocket.setDefaultSocketTimeout(feed_timeout) + log.debug("Socket timeout set to %d seconds", feed_timeout) + except ImportError: + import socket + if hasattr(socket, 'setdefaulttimeout'): + log.debug("timeoutsocket not found, using python function") + socket.setdefaulttimeout(feed_timeout) + log.debug("Socket timeout set to %d seconds", feed_timeout) + else: + log.error("Unable to set timeout to %d seconds", feed_timeout) + + # run the planet + my_planet = planet.Planet(config) + my_planet.run(planet_name, planet_link, template_files, offline) + + + ## This is where archiving is done! ## + #add the current channels to the db + channels = my_planet.channels() + for channel in channels: + + author_name = channel.name + + try: + author_face = channel.face + except: + author_face = None + try: + channel_subtitle = channel.subtitle + except: + channel_subtitle = None + try: + channel_title = channel.title + except: + channel_title = None + + channel_url = channel.url + + try: + channel_link = channel.link + except: + channel_link = None + + try: + channel_urlstatus = channel.url_status + except: + channel_urlstatus = None + + label = channel.label + + label_personal = 0 + label_lkd = 0 + label_community = 0 + label_eng = 0 + if label == "Personal": + label_personal = 1 + if label == "LKD": + label_lkd = 1 + if label == "Community": + label_community = 1 + if label == "Eng": + label_eng = 1 + + id = channel.id + + try: + author = Authors.objects.get(author_id=id) + + #update the values with the ones at the config file + author.author_name = author_name + #print author_name + author.author_face = author_face + author.channel_subtitle = channel_subtitle + author.channel_title = channel_title + author.channel_url = channel_url + author.channel_link = channel_link + author.channel_url_status = channel_urlstatus + author.label_personal = label_personal + author.label_lkd = label_lkd + author.label_community = label_community + author.label_eng = label_eng + + except Exception, ex: + #print ex + author = Authors(author_id=id, author_name=author_name, author_face=author_face, channel_subtitle=channel_subtitle, channel_title=channel_title, channel_url=channel_url, channel_link=channel_link, channel_urlstatus=channel_urlstatus, label_personal=label_personal, label_lkd=label_lkd, label_community=label_community, label_eng=label_eng) + + + author.save() + + #entry issues + items = channel.items() + for item in items: + id_hash = item.id_hash + + try: + entry = author.entries_set.get(id_hash = id_hash) + entry.title = item.title + entry.content_html = item.content + entry.content_text = entry.sanitize(item.content) + entry.summary = item.summary + entry.link = item.link + d = item.date + entry.date = datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5]) + except: + content_html = item.content + #content_text = entry.sanitize(content_html) + d = item.date + if not item.has_key('summary'): summary = None + else: summary = item.summary + entry = author.entries_set.create(id_hash=id_hash, title=item.title, content_html=item.content, summary=summary, link=item.link, date=datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5])) + entry.content_text = entry.sanitize(content_html) + + entry.save() + + #datetime issue + r = RunTime() + r.save() + + my_planet.generate_all_files(template_files, planet_name, + planet_link, planet_feed, owner_name, owner_email) + + +if __name__ == "__main__": + main() + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/__init__.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/__init__.py new file mode 100755 index 0000000..7829731 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/__init__.py @@ -0,0 +1,969 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Planet aggregator library. + +This package is a library for developing web sites or software that +aggregate RSS, CDF and Atom feeds taken from elsewhere into a single, +combined feed. +""" + +__version__ = "2.0" +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + +import locale + +# Modules available without separate import +import cache +import feedparser +import sanitize +import htmltmpl +import sgmllib +try: + import logging +except: + import compat_logging as logging + +# Limit the effect of "from planet import *" +__all__ = ("cache", "feedparser", "htmltmpl", "logging", + "Planet", "Channel", "NewsItem") + + +import os +import md5 +import time +import dbhash +import re + +try: + from xml.sax.saxutils import escape +except: + def escape(data): + return data.replace("&","&").replace(">",">").replace("<","<") + +# Version information (for generator headers) +VERSION = ("Planet/%s +http://www.planetplanet.org" % __version__) + +# Default User-Agent header to send when retreiving feeds +USER_AGENT = VERSION + " " + feedparser.USER_AGENT + +# Default cache directory +CACHE_DIRECTORY = "cache" + +# Default number of items to display from a new feed +NEW_FEED_ITEMS = 10 + +# Useful common date/time formats +TIMEFMT_ISO = "%Y-%m-%dT%H:%M:%S+00:00" +TIMEFMT_822 = "%a, %d %b %Y %H:%M:%S +0000" + + +# Log instance to use here +log = logging.getLogger("planet") +try: + log.warning +except: + log.warning = log.warn + +# Defaults for the template file config sections +ENCODING = "utf-8" +ITEMS_PER_PAGE = 60 +DAYS_PER_PAGE = 0 +OUTPUT_DIR = "output" +DATE_FORMAT = "%B %d, %Y %I:%M %p" +NEW_DATE_FORMAT = "%B %d, %Y" +ACTIVITY_THRESHOLD = 0 + +class stripHtml(sgmllib.SGMLParser): + "remove all tags from the data" + def __init__(self, data): + sgmllib.SGMLParser.__init__(self) + self.result='' + self.feed(data) + self.close() + def handle_data(self, data): + if data: self.result+=data + +def template_info(item, date_format): + """Produce a dictionary of template information.""" + info = {} + + #set the locale so that the dates at the feeds will be in english + lc=locale.getlocale() + if lc[0] == None: + try: + locale.setlocale(locale.LC_ALL, '') + except: + pass + elif lc[0].find("tr") != -1: + try: + locale.setlocale(locale.LC_ALL, '') + except: + pass + + for key in item.keys(): + if item.key_type(key) == item.DATE: + date = item.get_as_date(key) + info[key] = time.strftime(date_format, date) + info[key + "_iso"] = time.strftime(TIMEFMT_ISO, date) + info[key + "_822"] = time.strftime(TIMEFMT_822, date) + else: + info[key] = item[key] + if 'title' in item.keys(): + info['title_plain'] = stripHtml(info['title']).result + + return info + + +class Planet: + """A set of channels. + + This class represents a set of channels for which the items will + be aggregated together into one combined feed. + + Properties: + user_agent User-Agent header to fetch feeds with. + cache_directory Directory to store cached channels in. + new_feed_items Number of items to display from a new feed. + filter A regular expression that articles must match. + exclude A regular expression that articles must not match. + """ + def __init__(self, config): + self.config = config + + self._channels = [] + + self.user_agent = USER_AGENT + self.cache_directory = CACHE_DIRECTORY + self.new_feed_items = NEW_FEED_ITEMS + self.filter = None + self.exclude = None + + def tmpl_config_get(self, template, option, default=None, raw=0, vars=None): + """Get a template value from the configuration, with a default.""" + if self.config.has_option(template, option): + return self.config.get(template, option, raw=raw, vars=None) + elif self.config.has_option("Planet", option): + return self.config.get("Planet", option, raw=raw, vars=None) + else: + return default + + def gather_channel_info(self, template_file="Planet"): + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + + activity_threshold = int(self.tmpl_config_get(template_file, + "activity_threshold", + ACTIVITY_THRESHOLD)) + + if activity_threshold: + activity_horizon = \ + time.gmtime(time.time()-86400*activity_threshold) + else: + activity_horizon = 0 + + channels = {} + channels_list = [] + for channel in self.channels(hidden=1): + channels[channel] = template_info(channel, date_format) + channels_list.append(channels[channel]) + + # identify inactive feeds + if activity_horizon: + latest = channel.items(sorted=1) + if len(latest)==0 or latest[0].date < activity_horizon: + channels[channel]["message"] = \ + "no activity in %d days" % activity_threshold + + # report channel level errors + if not channel.url_status: continue + status = int(channel.url_status) + if status == 403: + channels[channel]["message"] = "403: forbidden" + elif status == 404: + channels[channel]["message"] = "404: not found" + elif status == 408: + channels[channel]["message"] = "408: request timeout" + elif status == 410: + channels[channel]["message"] = "410: gone" + elif status == 500: + channels[channel]["message"] = "internal server error" + elif status >= 400: + channels[channel]["message"] = "http status %s" % status + + return channels, channels_list + + def gather_items_info(self, channels, template_file="Planet", channel_list=None): + items_list = [] + prev_date = [] + prev_channel = None + + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + items_per_page = int(self.tmpl_config_get(template_file, + "items_per_page", ITEMS_PER_PAGE)) + days_per_page = int(self.tmpl_config_get(template_file, + "days_per_page", DAYS_PER_PAGE)) + new_date_format = self.tmpl_config_get(template_file, + "new_date_format", NEW_DATE_FORMAT, raw=1) + + for newsitem in self.items(max_items=items_per_page, + max_days=days_per_page, + channels=channel_list): + item_info = template_info(newsitem, date_format) + chan_info = channels[newsitem._channel] + for k, v in chan_info.items(): + item_info["channel_" + k] = v + + # Check for the start of a new day + if prev_date[:3] != newsitem.date[:3]: + prev_date = newsitem.date + item_info["new_date"] = time.strftime(new_date_format, + newsitem.date) + + # Check for the start of a new channel + if item_info.has_key("new_date") \ + or prev_channel != newsitem._channel: + prev_channel = newsitem._channel + item_info["new_channel"] = newsitem._channel.url + + items_list.append(item_info) + + return items_list + + def run(self, planet_name, planet_link, template_files, offline = False): + log = logging.getLogger("planet.runner") + + # Create a planet + log.info("Loading cached data") + if self.config.has_option("Planet", "cache_directory"): + self.cache_directory = self.config.get("Planet", "cache_directory") + if self.config.has_option("Planet", "new_feed_items"): + self.new_feed_items = int(self.config.get("Planet", "new_feed_items")) + self.user_agent = "%s +%s %s" % (planet_name, planet_link, + self.user_agent) + if self.config.has_option("Planet", "filter"): + self.filter = self.config.get("Planet", "filter") + + # The other configuration blocks are channels to subscribe to + for feed_url in self.config.sections(): + if feed_url == "Planet" or feed_url in template_files: + continue + log.info(feed_url) + # Create a channel, configure it and subscribe it + channel = Channel(self, feed_url) + self.subscribe(channel) + + # Update it + try: + if not offline and not channel.url_status == '410': + channel.update() + except KeyboardInterrupt: + raise + except: + log.exception("Update of <%s> failed", feed_url) + + def generate_all_files(self, template_files, planet_name, + planet_link, planet_feed, owner_name, owner_email): + + log = logging.getLogger("planet.runner") + # Go-go-gadget-template + for template_file in template_files: + manager = htmltmpl.TemplateManager() + log.info("Processing template %s", template_file) + try: + template = manager.prepare(template_file) + except htmltmpl.TemplateError: + template = manager.prepare(os.path.basename(template_file)) + # Read the configuration + output_dir = self.tmpl_config_get(template_file, + "output_dir", OUTPUT_DIR) + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + encoding = self.tmpl_config_get(template_file, "encoding", ENCODING) + + # We treat each template individually + base = os.path.splitext(os.path.basename(template_file))[0] + url = os.path.join(planet_link, base) + output_file = os.path.join(output_dir, base) + + # Gather information + channels, channels_list = self.gather_channel_info(template_file) + items_list = self.gather_items_info(channels, template_file) + + # Gather item information + + # Process the template + tp = htmltmpl.TemplateProcessor(html_escape=0) + tp.set("Items", items_list) + tp.set("Channels", channels_list) + + # Generic information + tp.set("generator", VERSION) + tp.set("name", planet_name) + tp.set("link", planet_link) + tp.set("owner_name", owner_name) + tp.set("owner_email", owner_email) + tp.set("url", url) + + if planet_feed: + tp.set("feed", planet_feed) + tp.set("feedtype", planet_feed.find('rss')>=0 and 'rss' or 'atom') + + # Update time + date = time.localtime() + tp.set("date", time.strftime(date_format, date)) + tp.set("date_iso", time.strftime(TIMEFMT_ISO, date)) + tp.set("date_822", time.strftime(TIMEFMT_822, date)) + + try: + log.info("Writing %s", output_file) + output_fd = open(output_file, "w") + if encoding.lower() in ("utf-8", "utf8"): + # UTF-8 output is the default because we use that internally + output_fd.write(tp.process(template)) + elif encoding.lower() in ("xml", "html", "sgml"): + # Magic for Python 2.3 users + output = tp.process(template).decode("utf-8") + output_fd.write(output.encode("ascii", "xmlcharrefreplace")) + else: + # Must be a "known" encoding + output = tp.process(template).decode("utf-8") + output_fd.write(output.encode(encoding, "replace")) + output_fd.close() + except KeyboardInterrupt: + raise + except: + log.exception("Write of %s failed", output_file) + + def channels(self, hidden=0, sorted=1): + """Return the list of channels.""" + channels = [] + for channel in self._channels: + if hidden or not channel.has_key("hidden"): + channels.append((channel.name, channel)) + + if sorted: + channels.sort() + + return [ c[-1] for c in channels ] + + def find_by_basename(self, basename): + for channel in self._channels: + if basename == channel.cache_basename(): return channel + + def subscribe(self, channel): + """Subscribe the planet to the channel.""" + self._channels.append(channel) + + def unsubscribe(self, channel): + """Unsubscribe the planet from the channel.""" + self._channels.remove(channel) + + def items(self, hidden=0, sorted=1, max_items=0, max_days=0, channels=None): + """Return an optionally filtered list of items in the channel. + + The filters are applied in the following order: + + If hidden is true then items in hidden channels and hidden items + will be returned. + + If sorted is true then the item list will be sorted with the newest + first. + + If max_items is non-zero then this number of items, at most, will + be returned. + + If max_days is non-zero then any items older than the newest by + this number of days won't be returned. Requires sorted=1 to work. + + + The sharp-eyed will note that this looks a little strange code-wise, + it turns out that Python gets *really* slow if we try to sort the + actual items themselves. Also we use mktime here, but it's ok + because we discard the numbers and just need them to be relatively + consistent between each other. + """ + planet_filter_re = None + if self.filter: + planet_filter_re = re.compile(self.filter, re.I) + planet_exclude_re = None + if self.exclude: + planet_exclude_re = re.compile(self.exclude, re.I) + + items = [] + seen_guids = {} + if not channels: channels=self.channels(hidden=hidden, sorted=0) + for channel in channels: + for item in channel._items.values(): + if hidden or not item.has_key("hidden"): + + channel_filter_re = None + if channel.filter: + channel_filter_re = re.compile(channel.filter, + re.I) + channel_exclude_re = None + if channel.exclude: + channel_exclude_re = re.compile(channel.exclude, + re.I) + if (planet_filter_re or planet_exclude_re \ + or channel_filter_re or channel_exclude_re): + title = "" + if item.has_key("title"): + title = item.title + content = item.get_content("content") + + if planet_filter_re: + if not (planet_filter_re.search(title) \ + or planet_filter_re.search(content)): + continue + + if planet_exclude_re: + if (planet_exclude_re.search(title) \ + or planet_exclude_re.search(content)): + continue + + if channel_filter_re: + if not (channel_filter_re.search(title) \ + or channel_filter_re.search(content)): + continue + + if channel_exclude_re: + if (channel_exclude_re.search(title) \ + or channel_exclude_re.search(content)): + continue + + if not seen_guids.has_key(item.id): + seen_guids[item.id] = 1; + items.append((time.mktime(item.date), item.order, item)) + + # Sort the list + if sorted: + items.sort() + items.reverse() + + # Apply max_items filter + if len(items) and max_items: + items = items[:max_items] + + # Apply max_days filter + if len(items) and max_days: + max_count = 0 + max_time = items[0][0] - max_days * 84600 + for item in items: + if item[0] > max_time: + max_count += 1 + else: + items = items[:max_count] + break + + return [ i[-1] for i in items ] + +class Channel(cache.CachedInfo): + """A list of news items. + + This class represents a list of news items taken from the feed of + a website or other source. + + Properties: + url URL of the feed. + url_etag E-Tag of the feed URL. + url_modified Last modified time of the feed URL. + url_status Last HTTP status of the feed URL. + hidden Channel should be hidden (True if exists). + name Name of the feed owner, or feed title. + next_order Next order number to be assigned to NewsItem + + updated Correct UTC-Normalised update time of the feed. + last_updated Correct UTC-Normalised time the feed was last updated. + + id An identifier the feed claims is unique (*). + title One-line title (*). + link Link to the original format feed (*). + tagline Short description of the feed (*). + info Longer description of the feed (*). + + modified Date the feed claims to have been modified (*). + + author Name of the author (*). + publisher Name of the publisher (*). + generator Name of the feed generator (*). + category Category name (*). + copyright Copyright information for humans to read (*). + license Link to the licence for the content (*). + docs Link to the specification of the feed format (*). + language Primary language (*). + errorreportsto E-Mail address to send error reports to (*). + + image_url URL of an associated image (*). + image_link Link to go with the associated image (*). + image_title Alternative text of the associated image (*). + image_width Width of the associated image (*). + image_height Height of the associated image (*). + + filter A regular expression that articles must match. + exclude A regular expression that articles must not match. + + Properties marked (*) will only be present if the original feed + contained them. Note that the optional 'modified' date field is simply + a claim made by the item and parsed from the information given, 'updated' + (and 'last_updated') are far more reliable sources of information. + + Some feeds may define additional properties to those above. + """ + IGNORE_KEYS = ("links", "contributors", "textinput", "cloud", "categories", + "url", "href", "url_etag", "url_modified", "tags", "itunes_explicit") + + def __init__(self, planet, url): + if not os.path.isdir(planet.cache_directory): + os.makedirs(planet.cache_directory) + cache_filename = cache.filename(planet.cache_directory, url) + cache_file = dbhash.open(cache_filename, "c", 0666) + + cache.CachedInfo.__init__(self, cache_file, url, root=1) + + self._items = {} + self._planet = planet + self._expired = [] + self.url = url + # retain the original URL for error reporting + self.configured_url = url + self.url_etag = None + self.url_status = None + self.url_modified = None + self.name = None + self.updated = None + self.last_updated = None + self.filter = None + self.exclude = None + self.next_order = "0" + self.cache_read() + self.cache_read_entries() + + if planet.config.has_section(url): + for option in planet.config.options(url): + value = planet.config.get(url, option) + self.set_as_string(option, value, cached=0) + + def has_item(self, id_): + """Check whether the item exists in the channel.""" + return self._items.has_key(id_) + + def get_item(self, id_): + """Return the item from the channel.""" + return self._items[id_] + + # Special methods + __contains__ = has_item + + def items(self, hidden=0, sorted=0): + """Return the item list.""" + items = [] + for item in self._items.values(): + if hidden or not item.has_key("hidden"): + items.append((time.mktime(item.date), item.order, item)) + + if sorted: + items.sort() + items.reverse() + + return [ i[-1] for i in items ] + + def __iter__(self): + """Iterate the sorted item list.""" + return iter(self.items(sorted=1)) + + def cache_read_entries(self): + """Read entry information from the cache.""" + keys = self._cache.keys() + for key in keys: + if key.find(" ") != -1: continue + if self.has_key(key): continue + + item = NewsItem(self, key) + self._items[key] = item + + def cache_basename(self): + return cache.filename('',self._id) + + def cache_write(self, sync=1): + + """Write channel and item information to the cache.""" + for item in self._items.values(): + item.cache_write(sync=0) + for item in self._expired: + item.cache_clear(sync=0) + cache.CachedInfo.cache_write(self, sync) + + self._expired = [] + + def feed_information(self): + """ + Returns a description string for the feed embedded in this channel. + + This will usually simply be the feed url embedded in <>, but in the + case where the current self.url has changed from the original + self.configured_url the string will contain both pieces of information. + This is so that the URL in question is easier to find in logging + output: getting an error about a URL that doesn't appear in your config + file is annoying. + """ + if self.url == self.configured_url: + return "<%s>" % self.url + else: + return "<%s> (formerly <%s>)" % (self.url, self.configured_url) + + def update(self): + """Download the feed to refresh the information. + + This does the actual work of pulling down the feed and if it changes + updates the cached information about the feed and entries within it. + """ + info = feedparser.parse(self.url, + etag=self.url_etag, modified=self.url_modified, + agent=self._planet.user_agent) + if info.has_key("status"): + self.url_status = str(info.status) + elif info.has_key("entries") and len(info.entries)>0: + self.url_status = str(200) + elif info.bozo and info.bozo_exception.__class__.__name__=='Timeout': + self.url_status = str(408) + else: + self.url_status = str(500) + + if self.url_status == '301' and \ + (info.has_key("entries") and len(info.entries)>0): + log.warning("Feed has moved from <%s> to <%s>", self.url, info.url) + try: + os.link(cache.filename(self._planet.cache_directory, self.url), + cache.filename(self._planet.cache_directory, info.url)) + except: + pass + self.url = info.url + elif self.url_status == '304': + log.info("Feed %s unchanged", self.feed_information()) + return + elif self.url_status == '410': + log.info("Feed %s gone", self.feed_information()) + self.cache_write() + return + elif self.url_status == '408': + log.warning("Feed %s timed out", self.feed_information()) + return + elif int(self.url_status) >= 400: + log.error("Error %s while updating feed %s", + self.url_status, self.feed_information()) + return + else: + log.info("Updating feed %s", self.feed_information()) + + self.url_etag = info.has_key("etag") and info.etag or None + self.url_modified = info.has_key("modified") and info.modified or None + if self.url_etag is not None: + log.debug("E-Tag: %s", self.url_etag) + if self.url_modified is not None: + log.debug("Last Modified: %s", + time.strftime(TIMEFMT_ISO, self.url_modified)) + + self.update_info(info.feed) + self.update_entries(info.entries) + self.cache_write() + + def update_info(self, feed): + """Update information from the feed. + + This reads the feed information supplied by feedparser and updates + the cached information about the feed. These are the various + potentially interesting properties that you might care about. + """ + for key in feed.keys(): + if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS: + # Ignored fields + pass + elif feed.has_key(key + "_parsed"): + # Ignore unparsed date fields + pass + elif key.endswith("_detail"): + # retain name and email sub-fields + if feed[key].has_key('name') and feed[key].name: + self.set_as_string(key.replace("_detail","_name"), \ + feed[key].name) + if feed[key].has_key('email') and feed[key].email: + self.set_as_string(key.replace("_detail","_email"), \ + feed[key].email) + elif key == "items": + # Ignore items field + pass + elif key.endswith("_parsed"): + # Date fields + if feed[key] is not None: + self.set_as_date(key[:-len("_parsed")], feed[key]) + elif key == "image": + # Image field: save all the information + if feed[key].has_key("url"): + self.set_as_string(key + "_url", feed[key].url) + if feed[key].has_key("link"): + self.set_as_string(key + "_link", feed[key].link) + if feed[key].has_key("title"): + self.set_as_string(key + "_title", feed[key].title) + if feed[key].has_key("width"): + self.set_as_string(key + "_width", str(feed[key].width)) + if feed[key].has_key("height"): + self.set_as_string(key + "_height", str(feed[key].height)) + elif isinstance(feed[key], (str, unicode)): + # String fields + try: + detail = key + '_detail' + if feed.has_key(detail) and feed[detail].has_key('type'): + if feed[detail].type == 'text/html': + feed[key] = sanitize.HTML(feed[key]) + elif feed[detail].type == 'text/plain': + feed[key] = escape(feed[key]) + self.set_as_string(key, feed[key]) + except KeyboardInterrupt: + raise + except: + log.exception("Ignored '%s' of <%s>, unknown format", + key, self.url) + + def update_entries(self, entries): + """Update entries from the feed. + + This reads the entries supplied by feedparser and updates the + cached information about them. It's at this point we update + the 'updated' timestamp and keep the old one in 'last_updated', + these provide boundaries for acceptable entry times. + + If this is the first time a feed has been updated then most of the + items will be marked as hidden, according to Planet.new_feed_items. + + If the feed does not contain items which, according to the sort order, + should be there; those items are assumed to have been expired from + the feed or replaced and are removed from the cache. + """ + if not len(entries): + return + + self.last_updated = self.updated + self.updated = time.gmtime() + + new_items = [] + feed_items = [] + for entry in entries: + # Try really hard to find some kind of unique identifier + if entry.has_key("id"): + entry_id = cache.utf8(entry.id) + elif entry.has_key("link"): + entry_id = cache.utf8(entry.link) + elif entry.has_key("title"): + entry_id = (self.url + "/" + + md5.new(cache.utf8(entry.title)).hexdigest()) + elif entry.has_key("summary"): + entry_id = (self.url + "/" + + md5.new(cache.utf8(entry.summary)).hexdigest()) + else: + log.error("Unable to find or generate id, entry ignored") + continue + + # Create the item if necessary and update + if self.has_item(entry_id): + item = self._items[entry_id] + else: + item = NewsItem(self, entry_id) + self._items[entry_id] = item + new_items.append(item) + item.update(entry) + feed_items.append(entry_id) + + # Hide excess items the first time through + if self.last_updated is None and self._planet.new_feed_items \ + and len(feed_items) > self._planet.new_feed_items: + item.hidden = "yes" + log.debug("Marked <%s> as hidden (new feed)", entry_id) + + # Assign order numbers in reverse + new_items.reverse() + for item in new_items: + item.order = self.next_order = str(int(self.next_order) + 1) + + # Check for expired or replaced items + feed_count = len(feed_items) + log.debug("Items in Feed: %d", feed_count) + for item in self.items(sorted=1): + if feed_count < 1: + break + elif item.id in feed_items: + feed_count -= 1 + elif item._channel.url_status != '226': + del(self._items[item.id]) + self._expired.append(item) + log.debug("Removed expired or replaced item <%s>", item.id) + + def get_name(self, key): + """Return the key containing the name.""" + for key in ("name", "title"): + if self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_string(key) + + return "" + +class NewsItem(cache.CachedInfo): + """An item of news. + + This class represents a single item of news on a channel. They're + created by members of the Channel class and accessible through it. + + Properties: + id Channel-unique identifier for this item. + id_hash Relatively short, printable cryptographic hash of id + date Corrected UTC-Normalised update time, for sorting. + order Order in which items on the same date can be sorted. + hidden Item should be hidden (True if exists). + + title One-line title (*). + link Link to the original format text (*). + summary Short first-page summary (*). + content Full HTML content. + + modified Date the item claims to have been modified (*). + issued Date the item claims to have been issued (*). + created Date the item claims to have been created (*). + expired Date the item claims to expire (*). + + author Name of the author (*). + publisher Name of the publisher (*). + category Category name (*). + comments Link to a page to enter comments (*). + license Link to the licence for the content (*). + source_name Name of the original source of this item (*). + source_link Link to the original source of this item (*). + + Properties marked (*) will only be present if the original feed + contained them. Note that the various optional date fields are + simply claims made by the item and parsed from the information + given, 'date' is a far more reliable source of information. + + Some feeds may define additional properties to those above. + """ + IGNORE_KEYS = ("categories", "contributors", "enclosures", "links", + "guidislink", "date", "tags") + + def __init__(self, channel, id_): + cache.CachedInfo.__init__(self, channel._cache, id_) + + self._channel = channel + self.id = id_ + self.id_hash = md5.new(id_).hexdigest() + self.date = None + self.order = None + self.content = None + self.cache_read() + + def update(self, entry): + """Update the item from the feedparser entry given.""" + for key in entry.keys(): + if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS: + # Ignored fields + pass + elif entry.has_key(key + "_parsed"): + # Ignore unparsed date fields + pass + elif key.endswith("_detail"): + # retain name, email, and language sub-fields + if entry[key].has_key('name') and entry[key].name: + self.set_as_string(key.replace("_detail","_name"), \ + entry[key].name) + if entry[key].has_key('email') and entry[key].email: + self.set_as_string(key.replace("_detail","_email"), \ + entry[key].email) + if entry[key].has_key('language') and entry[key].language and \ + (not self._channel.has_key('language') or \ + entry[key].language != self._channel.language): + self.set_as_string(key.replace("_detail","_language"), \ + entry[key].language) + elif key.endswith("_parsed"): + # Date fields + if entry[key] is not None: + self.set_as_date(key[:-len("_parsed")], entry[key]) + elif key == "source": + # Source field: save both url and value + if entry[key].has_key("value"): + self.set_as_string(key + "_name", entry[key].value) + if entry[key].has_key("url"): + self.set_as_string(key + "_link", entry[key].url) + elif key == "content": + # Content field: concatenate the values + value = "" + for item in entry[key]: + if item.type == 'text/html': + item.value = sanitize.HTML(item.value) + elif item.type == 'text/plain': + item.value = escape(item.value) + if item.has_key('language') and item.language and \ + (not self._channel.has_key('language') or + item.language != self._channel.language) : + self.set_as_string(key + "_language", item.language) + value += cache.utf8(item.value) + self.set_as_string(key, value) + elif isinstance(entry[key], (str, unicode)): + # String fields + try: + detail = key + '_detail' + if entry.has_key(detail): + if entry[detail].has_key('type'): + if entry[detail].type == 'text/html': + entry[key] = sanitize.HTML(entry[key]) + elif entry[detail].type == 'text/plain': + entry[key] = escape(entry[key]) + self.set_as_string(key, entry[key]) + except KeyboardInterrupt: + raise + except: + log.exception("Ignored '%s' of <%s>, unknown format", + key, self.id) + + # Generate the date field if we need to + self.get_date("date") + + def get_date(self, key): + """Get (or update) the date key. + + We check whether the date the entry claims to have been changed is + since we last updated this feed and when we pulled the feed off the + site. + + If it is then it's probably not bogus, and we'll sort accordingly. + + If it isn't then we bound it appropriately, this ensures that + entries appear in posting sequence but don't overlap entries + added in previous updates and don't creep into the next one. + """ + + for other_key in ("updated", "modified", "published", "issued", "created"): + if self.has_key(other_key): + date = self.get_as_date(other_key) + break + else: + date = None + + if date is not None: + if date > self._channel.updated: + date = self._channel.updated +# elif date < self._channel.last_updated: +# date = self._channel.updated + elif self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_date(key) + else: + date = self._channel.updated + + self.set_as_date(key, date) + return date + + def get_content(self, key): + """Return the key containing the content.""" + for key in ("content", "tagline", "summary"): + if self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_string(key) + + return "" diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/atomstyler.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/atomstyler.py new file mode 100755 index 0000000..9220702 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/atomstyler.py @@ -0,0 +1,124 @@ +from xml.dom import minidom, Node +from urlparse import urlparse, urlunparse +from xml.parsers.expat import ExpatError +from htmlentitydefs import name2codepoint +import re + +# select and apply an xml:base for this entry +class relativize: + def __init__(self, parent): + self.score = {} + self.links = [] + self.collect_and_tally(parent) + self.base = self.select_optimal_base() + if self.base: + if not parent.hasAttribute('xml:base'): + self.rebase(parent) + parent.setAttribute('xml:base', self.base) + + # collect and tally cite, href and src attributes + def collect_and_tally(self,parent): + uri = None + if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') + if parent.hasAttribute('href'): uri=parent.getAttribute('href') + if parent.hasAttribute('src'): uri=parent.getAttribute('src') + + if uri: + parts=urlparse(uri) + if parts[0].lower() == 'http': + parts = (parts[1]+parts[2]).split('/') + base = None + for i in range(1,len(parts)): + base = tuple(parts[0:i]) + self.score[base] = self.score.get(base,0) + len(base) + if base and base not in self.links: self.links.append(base) + + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + self.collect_and_tally(node) + + # select the xml:base with the highest score + def select_optimal_base(self): + if not self.score: return None + for link in self.links: + self.score[link] = 0 + winner = max(self.score.values()) + if not winner: return None + for key in self.score.keys(): + if self.score[key] == winner: + if winner == len(key): return None + return urlunparse(('http', key[0], '/'.join(key[1:]), '', '', '')) + '/' + + # rewrite cite, href and src attributes using this base + def rebase(self,parent): + uri = None + if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') + if parent.hasAttribute('href'): uri=parent.getAttribute('href') + if parent.hasAttribute('src'): uri=parent.getAttribute('src') + if uri and uri.startswith(self.base): + uri = uri[len(self.base):] or '.' + if parent.hasAttribute('href'): uri=parent.setAttribute('href', uri) + if parent.hasAttribute('src'): uri=parent.setAttribute('src', uri) + + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + self.rebase(node) + +# convert type="html" to type="plain" or type="xhtml" as appropriate +def retype(parent): + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + + if node.hasAttribute('type') and node.getAttribute('type') == 'html': + if len(node.childNodes)==0: + node.removeAttribute('type') + elif len(node.childNodes)==1: + + # replace html entity defs with utf-8 + chunks=re.split('&(\w+);', node.childNodes[0].nodeValue) + for i in range(1,len(chunks),2): + if chunks[i] in ['amp', 'lt', 'gt', 'apos', 'quot']: + chunks[i] ='&' + chunks[i] +';' + elif chunks[i] in name2codepoint: + chunks[i]=unichr(name2codepoint[chunks[i]]) + else: + chunks[i]='&' + chunks[i] + ';' + text = u"".join(chunks) + + try: + # see if the resulting text is a well-formed XML fragment + div = '
%s
' + data = minidom.parseString((div % text.encode('utf-8'))) + + if text.find('<') < 0: + # plain text + node.removeAttribute('type') + text = data.documentElement.childNodes[0].nodeValue + node.childNodes[0].replaceWholeText(text) + + elif len(text) > 80: + # xhtml + node.setAttribute('type', 'xhtml') + node.removeChild(node.childNodes[0]) + node.appendChild(data.documentElement) + + except ExpatError: + # leave as html + pass + + else: + # recurse + retype(node) + + if parent.nodeName == 'entry': + relativize(parent) + +if __name__ == '__main__': + + # run styler on each file mention on the command line + import sys + for feed in sys.argv[1:]: + doc = minidom.parse(feed) + doc.normalize() + retype(doc.documentElement) + open(feed,'w').write(doc.toxml('utf-8')) diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/cache.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/cache.py new file mode 100755 index 0000000..dfc529b --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/cache.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Item cache. + +Between runs of Planet we need somewhere to store the feed information +we parsed, this is so we don't lose information when a particular feed +goes away or is too short to hold enough items. + +This module provides the code to handle this cache transparently enough +that the rest of the code can take the persistance for granted. +""" + +import os +import re + + +# Regular expressions to sanitise cache filenames +re_url_scheme = re.compile(r'^[^:]*://') +re_slash = re.compile(r'[?/]+') +re_initial_cruft = re.compile(r'^[,.]*') +re_final_cruft = re.compile(r'[,.]*$') + + +class CachedInfo: + """Cached information. + + This class is designed to hold information that is stored in a cache + between instances. It can act both as a dictionary (c['foo']) and + as an object (c.foo) to get and set values and supports both string + and date values. + + If you wish to support special fields you can derive a class off this + and implement get_FIELD and set_FIELD functions which will be + automatically called. + """ + STRING = "string" + DATE = "date" + NULL = "null" + + def __init__(self, cache, id_, root=0): + self._type = {} + self._value = {} + self._cached = {} + + self._cache = cache + self._id = id_.replace(" ", "%20") + self._root = root + + def cache_key(self, key): + """Return the cache key name for the given key.""" + key = key.replace(" ", "_") + if self._root: + return key + else: + return self._id + " " + key + + def cache_read(self): + """Read information from the cache.""" + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + if self._cache.has_key(keys_key): + keys = self._cache[keys_key].split(" ") + else: + return + + for key in keys: + cache_key = self.cache_key(key) + if not self._cached.has_key(key) or self._cached[key]: + # Key either hasn't been loaded, or is one for the cache + self._value[key] = self._cache[cache_key] + self._type[key] = self._cache[cache_key + " type"] + self._cached[key] = 1 + + def cache_write(self, sync=1): + """Write information to the cache.""" + self.cache_clear(sync=0) + + keys = [] + for key in self.keys(): + cache_key = self.cache_key(key) + if not self._cached[key]: + if self._cache.has_key(cache_key): + # Non-cached keys need to be cleared + del(self._cache[cache_key]) + del(self._cache[cache_key + " type"]) + continue + + keys.append(key) + self._cache[cache_key] = self._value[key] + self._cache[cache_key + " type"] = self._type[key] + + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + self._cache[keys_key] = " ".join(keys) + if sync: + self._cache.sync() + + def cache_clear(self, sync=1): + """Remove information from the cache.""" + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + if self._cache.has_key(keys_key): + keys = self._cache[keys_key].split(" ") + del(self._cache[keys_key]) + else: + return + + for key in keys: + cache_key = self.cache_key(key) + del(self._cache[cache_key]) + del(self._cache[cache_key + " type"]) + + if sync: + self._cache.sync() + + def has_key(self, key): + """Check whether the key exists.""" + key = key.replace(" ", "_") + return self._value.has_key(key) + + def key_type(self, key): + """Return the key type.""" + key = key.replace(" ", "_") + return self._type[key] + + def set(self, key, value, cached=1): + """Set the value of the given key. + + If a set_KEY function exists that is called otherwise the + string function is called and the date function if that fails + (it nearly always will). + """ + key = key.replace(" ", "_") + + try: + func = getattr(self, "set_" + key) + except AttributeError: + pass + else: + return func(key, value) + + if value == None: + return self.set_as_null(key, value) + else: + try: + return self.set_as_string(key, value) + except TypeError: + return self.set_as_date(key, value) + + def get(self, key): + """Return the value of the given key. + + If a get_KEY function exists that is called otherwise the + correctly typed function is called if that exists. + """ + key = key.replace(" ", "_") + + try: + func = getattr(self, "get_" + key) + except AttributeError: + pass + else: + return func(key) + + try: + func = getattr(self, "get_as_" + self._type[key]) + except AttributeError: + pass + else: + return func(key) + + return self._value[key] + + def set_as_string(self, key, value, cached=1): + """Set the key to the string value. + + The value is converted to UTF-8 if it is a Unicode string, otherwise + it's assumed to have failed decoding (feedparser tries pretty hard) + so has all non-ASCII characters stripped. + """ + value = utf8(value) + + key = key.replace(" ", "_") + self._value[key] = value + self._type[key] = self.STRING + self._cached[key] = cached + + def get_as_string(self, key): + """Return the key as a string value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + return self._value[key] + + def set_as_date(self, key, value, cached=1): + """Set the key to the date value. + + The date should be a 9-item tuple as returned by time.gmtime(). + """ + value = " ".join([ str(s) for s in value ]) + + key = key.replace(" ", "_") + self._value[key] = value + self._type[key] = self.DATE + self._cached[key] = cached + + def get_as_date(self, key): + """Return the key as a date value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + value = self._value[key] + return tuple([ int(i) for i in value.split(" ") ]) + + def set_as_null(self, key, value, cached=1): + """Set the key to the null value. + + This only exists to make things less magic. + """ + key = key.replace(" ", "_") + self._value[key] = "" + self._type[key] = self.NULL + self._cached[key] = cached + + def get_as_null(self, key): + """Return the key as the null value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + return None + + def del_key(self, key): + """Delete the given key.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + del(self._value[key]) + del(self._type[key]) + del(self._cached[key]) + + def keys(self): + """Return the list of cached keys.""" + return self._value.keys() + + def __iter__(self): + """Iterate the cached keys.""" + return iter(self._value.keys()) + + # Special methods + __contains__ = has_key + __setitem__ = set_as_string + __getitem__ = get + __delitem__ = del_key + __delattr__ = del_key + + def __setattr__(self, key, value): + if key.startswith("_"): + self.__dict__[key] = value + else: + self.set(key, value) + + def __getattr__(self, key): + if self.has_key(key): + return self.get(key) + else: + raise AttributeError, key + + +def filename(directory, filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + filename = re_initial_cruft.sub("", filename) + filename = re_final_cruft.sub("", filename) + + return os.path.join(directory, filename) + +def utf8(value): + """Return the value as a UTF-8 string.""" + if type(value) == type(u''): + return value.encode("utf-8") + else: + try: + return unicode(value, "utf-8").encode("utf-8") + except UnicodeError: + try: + return unicode(value, "iso-8859-1").encode("utf-8") + except UnicodeError: + return unicode(value, "ascii", "replace").encode("utf-8") diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/compat_logging/__init__.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/compat_logging/__init__.py new file mode 100755 index 0000000..3bd0c6d --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/compat_logging/__init__.py @@ -0,0 +1,1196 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'sys._getframe()' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, os, types, time, string, cStringIO + +try: + import thread + import threading +except ImportError: + thread = None + +__author__ = "Vinay Sajip " +__status__ = "beta" +__version__ = "0.4.8.1" +__date__ = "26 June 2003" + +#--------------------------------------------------------------------------- +# Miscellaneous module data +#--------------------------------------------------------------------------- + +# +#_srcfile is used when walking the stack to check when we've got the first +# caller stack frame. +# +if string.lower(__file__[-4:]) in ['.pyc', '.pyo']: + _srcfile = __file__[:-4] + '.py' +else: + _srcfile = __file__ +_srcfile = os.path.normcase(_srcfile) + +# _srcfile is only used in conjunction with sys._getframe(). +# To provide compatibility with older versions of Python, set _srcfile +# to None if _getframe() is not available; this value will prevent +# findCaller() from being called. +if not hasattr(sys, "_getframe"): + _srcfile = None + +# +#_startTime is used as the base when calculating the relative time of events +# +_startTime = time.time() + +# +#raiseExceptions is used to see if exceptions during handling should be +#propagated +# +raiseExceptions = 1 + +#--------------------------------------------------------------------------- +# Level related stuff +#--------------------------------------------------------------------------- +# +# Default levels and level names, these can be replaced with any positive set +# of values having corresponding names. There is a pseudo-level, NOTSET, which +# is only really there as a lower limit for user-defined levels. Handlers and +# loggers are initialized with NOTSET so that they will log all messages, even +# at user-defined levels. +# +CRITICAL = 50 +FATAL = CRITICAL +ERROR = 40 +WARNING = 30 +WARN = WARNING +INFO = 20 +DEBUG = 10 +NOTSET = 0 + +_levelNames = { + CRITICAL : 'CRITICAL', + ERROR : 'ERROR', + WARNING : 'WARNING', + INFO : 'INFO', + DEBUG : 'DEBUG', + NOTSET : 'NOTSET', + 'CRITICAL' : CRITICAL, + 'ERROR' : ERROR, + 'WARN' : WARNING, + 'WARNING' : WARNING, + 'INFO' : INFO, + 'DEBUG' : DEBUG, + 'NOTSET' : NOTSET, +} + +def getLevelName(level): + """ + Return the textual representation of logging level 'level'. + + If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, + INFO, DEBUG) then you get the corresponding string. If you have + associated levels with names using addLevelName then the name you have + associated with 'level' is returned. Otherwise, the string + "Level %s" % level is returned. + """ + return _levelNames.get(level, ("Level %s" % level)) + +def addLevelName(level, levelName): + """ + Associate 'levelName' with 'level'. + + This is used when converting levels to text during message formatting. + """ + _acquireLock() + try: #unlikely to cause an exception, but you never know... + _levelNames[level] = levelName + _levelNames[levelName] = level + finally: + _releaseLock() + +#--------------------------------------------------------------------------- +# Thread-related stuff +#--------------------------------------------------------------------------- + +# +#_lock is used to serialize access to shared data structures in this module. +#This needs to be an RLock because fileConfig() creates Handlers and so +#might arbitrary user threads. Since Handler.__init__() updates the shared +#dictionary _handlers, it needs to acquire the lock. But if configuring, +#the lock would already have been acquired - so we need an RLock. +#The same argument applies to Loggers and Manager.loggerDict. +# +_lock = None + +def _acquireLock(): + """ + Acquire the module-level lock for serializing access to shared data. + + This should be released with _releaseLock(). + """ + global _lock + if (not _lock) and thread: + _lock = threading.RLock() + if _lock: + _lock.acquire() + +def _releaseLock(): + """ + Release the module-level lock acquired by calling _acquireLock(). + """ + if _lock: + _lock.release() + +#--------------------------------------------------------------------------- +# The logging record +#--------------------------------------------------------------------------- + +class LogRecord: + """ + A LogRecord instance represents an event being logged. + + LogRecord instances are created every time something is logged. They + contain all the information pertinent to the event being logged. The + main information passed in is in msg and args, which are combined + using str(msg) % args to create the message field of the record. The + record also includes information such as when the record was created, + the source line where the logging call was made, and any exception + information to be logged. + """ + def __init__(self, name, level, pathname, lineno, msg, args, exc_info): + """ + Initialize a logging record with interesting information. + """ + ct = time.time() + self.name = name + self.msg = msg + self.args = args + self.levelname = getLevelName(level) + self.levelno = level + self.pathname = pathname + try: + self.filename = os.path.basename(pathname) + self.module = os.path.splitext(self.filename)[0] + except: + self.filename = pathname + self.module = "Unknown module" + self.exc_info = exc_info + self.lineno = lineno + self.created = ct + self.msecs = (ct - long(ct)) * 1000 + self.relativeCreated = (self.created - _startTime) * 1000 + if thread: + self.thread = thread.get_ident() + else: + self.thread = None + if hasattr(os, 'getpid'): + self.process = os.getpid() + else: + self.process = None + + def __str__(self): + return ''%(self.name, self.levelno, + self.pathname, self.lineno, self.msg) + + def getMessage(self): + """ + Return the message for this LogRecord. + + Return the message for this LogRecord after merging any user-supplied + arguments with the message. + """ + if not hasattr(types, "UnicodeType"): #if no unicode support... + msg = str(self.msg) + else: + try: + msg = str(self.msg) + except UnicodeError: + msg = self.msg #Defer encoding till later + if self.args: + msg = msg % self.args + return msg + +def makeLogRecord(dict): + """ + Make a LogRecord whose attributes are defined by the specified dictionary, + This function is useful for converting a logging event received over + a socket connection (which is sent as a dictionary) into a LogRecord + instance. + """ + rv = LogRecord(None, None, "", 0, "", (), None) + rv.__dict__.update(dict) + return rv + +#--------------------------------------------------------------------------- +# Formatter classes and functions +#--------------------------------------------------------------------------- + +class Formatter: + """ + Formatter instances are used to convert a LogRecord to text. + + Formatters need to know how a LogRecord is constructed. They are + responsible for converting a LogRecord to (usually) a string which can + be interpreted by either a human or an external system. The base Formatter + allows a formatting string to be specified. If none is supplied, the + default value of "%s(message)\\n" is used. + + The Formatter can be initialized with a format string which makes use of + knowledge of the LogRecord attributes - e.g. the default value mentioned + above makes use of the fact that the user's message and arguments are pre- + formatted into a LogRecord's message attribute. Currently, the useful + attributes in a LogRecord are described by: + + %(name)s Name of the logger (logging channel) + %(levelno)s Numeric logging level for the message (DEBUG, INFO, + WARNING, ERROR, CRITICAL) + %(levelname)s Text logging level for the message ("DEBUG", "INFO", + "WARNING", "ERROR", "CRITICAL") + %(pathname)s Full pathname of the source file where the logging + call was issued (if available) + %(filename)s Filename portion of pathname + %(module)s Module (name portion of filename) + %(lineno)d Source line number where the logging call was issued + (if available) + %(created)f Time when the LogRecord was created (time.time() + return value) + %(asctime)s Textual time when the LogRecord was created + %(msecs)d Millisecond portion of the creation time + %(relativeCreated)d Time in milliseconds when the LogRecord was created, + relative to the time the logging module was loaded + (typically at application startup time) + %(thread)d Thread ID (if available) + %(process)d Process ID (if available) + %(message)s The result of record.getMessage(), computed just as + the record is emitted + """ + + converter = time.localtime + + def __init__(self, fmt=None, datefmt=None): + """ + Initialize the formatter with specified format strings. + + Initialize the formatter either with the specified format string, or a + default as described above. Allow for specialized date formatting with + the optional datefmt argument (if omitted, you get the ISO8601 format). + """ + if fmt: + self._fmt = fmt + else: + self._fmt = "%(message)s" + self.datefmt = datefmt + + def formatTime(self, record, datefmt=None): + """ + Return the creation time of the specified LogRecord as formatted text. + + This method should be called from format() by a formatter which + wants to make use of a formatted time. This method can be overridden + in formatters to provide for any specific requirement, but the + basic behaviour is as follows: if datefmt (a string) is specified, + it is used with time.strftime() to format the creation time of the + record. Otherwise, the ISO8601 format is used. The resulting + string is returned. This function uses a user-configurable function + to convert the creation time to a tuple. By default, time.localtime() + is used; to change this for a particular formatter instance, set the + 'converter' attribute to a function with the same signature as + time.localtime() or time.gmtime(). To change it for all formatters, + for example if you want all logging times to be shown in GMT, + set the 'converter' attribute in the Formatter class. + """ + ct = self.converter(record.created) + if datefmt: + s = time.strftime(datefmt, ct) + else: + t = time.strftime("%Y-%m-%d %H:%M:%S", ct) + s = "%s,%03d" % (t, record.msecs) + return s + + def formatException(self, ei): + """ + Format and return the specified exception information as a string. + + This default implementation just uses + traceback.print_exception() + """ + import traceback + sio = cStringIO.StringIO() + traceback.print_exception(ei[0], ei[1], ei[2], None, sio) + s = sio.getvalue() + sio.close() + if s[-1] == "\n": + s = s[:-1] + return s + + def format(self, record): + """ + Format the specified record as text. + + The record's attribute dictionary is used as the operand to a + string formatting operation which yields the returned string. + Before formatting the dictionary, a couple of preparatory steps + are carried out. The message attribute of the record is computed + using LogRecord.getMessage(). If the formatting string contains + "%(asctime)", formatTime() is called to format the event time. + If there is exception information, it is formatted using + formatException() and appended to the message. + """ + record.message = record.getMessage() + if string.find(self._fmt,"%(asctime)") >= 0: + record.asctime = self.formatTime(record, self.datefmt) + s = self._fmt % record.__dict__ + if record.exc_info: + if s[-1] != "\n": + s = s + "\n" + s = s + self.formatException(record.exc_info) + return s + +# +# The default formatter to use when no other is specified +# +_defaultFormatter = Formatter() + +class BufferingFormatter: + """ + A formatter suitable for formatting a number of records. + """ + def __init__(self, linefmt=None): + """ + Optionally specify a formatter which will be used to format each + individual record. + """ + if linefmt: + self.linefmt = linefmt + else: + self.linefmt = _defaultFormatter + + def formatHeader(self, records): + """ + Return the header string for the specified records. + """ + return "" + + def formatFooter(self, records): + """ + Return the footer string for the specified records. + """ + return "" + + def format(self, records): + """ + Format the specified records and return the result as a string. + """ + rv = "" + if len(records) > 0: + rv = rv + self.formatHeader(records) + for record in records: + rv = rv + self.linefmt.format(record) + rv = rv + self.formatFooter(records) + return rv + +#--------------------------------------------------------------------------- +# Filter classes and functions +#--------------------------------------------------------------------------- + +class Filter: + """ + Filter instances are used to perform arbitrary filtering of LogRecords. + + Loggers and Handlers can optionally use Filter instances to filter + records as desired. The base filter class only allows events which are + below a certain point in the logger hierarchy. For example, a filter + initialized with "A.B" will allow events logged by loggers "A.B", + "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If + initialized with the empty string, all events are passed. + """ + def __init__(self, name=''): + """ + Initialize a filter. + + Initialize with the name of the logger which, together with its + children, will have its events allowed through the filter. If no + name is specified, allow every event. + """ + self.name = name + self.nlen = len(name) + + def filter(self, record): + """ + Determine if the specified record is to be logged. + + Is the specified record to be logged? Returns 0 for no, nonzero for + yes. If deemed appropriate, the record may be modified in-place. + """ + if self.nlen == 0: + return 1 + elif self.name == record.name: + return 1 + elif string.find(record.name, self.name, 0, self.nlen) != 0: + return 0 + return (record.name[self.nlen] == ".") + +class Filterer: + """ + A base class for loggers and handlers which allows them to share + common code. + """ + def __init__(self): + """ + Initialize the list of filters to be an empty list. + """ + self.filters = [] + + def addFilter(self, filter): + """ + Add the specified filter to this handler. + """ + if not (filter in self.filters): + self.filters.append(filter) + + def removeFilter(self, filter): + """ + Remove the specified filter from this handler. + """ + if filter in self.filters: + self.filters.remove(filter) + + def filter(self, record): + """ + Determine if a record is loggable by consulting all the filters. + + The default is to allow the record to be logged; any filter can veto + this and the record is then dropped. Returns a zero value if a record + is to be dropped, else non-zero. + """ + rv = 1 + for f in self.filters: + if not f.filter(record): + rv = 0 + break + return rv + +#--------------------------------------------------------------------------- +# Handler classes and functions +#--------------------------------------------------------------------------- + +_handlers = {} #repository of handlers (for flushing when shutdown called) + +class Handler(Filterer): + """ + Handler instances dispatch logging events to specific destinations. + + The base handler class. Acts as a placeholder which defines the Handler + interface. Handlers can optionally use Formatter instances to format + records as desired. By default, no formatter is specified; in this case, + the 'raw' message as determined by record.message is logged. + """ + def __init__(self, level=NOTSET): + """ + Initializes the instance - basically setting the formatter to None + and the filter list to empty. + """ + Filterer.__init__(self) + self.level = level + self.formatter = None + #get the module data lock, as we're updating a shared structure. + _acquireLock() + try: #unlikely to raise an exception, but you never know... + _handlers[self] = 1 + finally: + _releaseLock() + self.createLock() + + def createLock(self): + """ + Acquire a thread lock for serializing access to the underlying I/O. + """ + if thread: + self.lock = thread.allocate_lock() + else: + self.lock = None + + def acquire(self): + """ + Acquire the I/O thread lock. + """ + if self.lock: + self.lock.acquire() + + def release(self): + """ + Release the I/O thread lock. + """ + if self.lock: + self.lock.release() + + def setLevel(self, level): + """ + Set the logging level of this handler. + """ + self.level = level + + def format(self, record): + """ + Format the specified record. + + If a formatter is set, use it. Otherwise, use the default formatter + for the module. + """ + if self.formatter: + fmt = self.formatter + else: + fmt = _defaultFormatter + return fmt.format(record) + + def emit(self, record): + """ + Do whatever it takes to actually log the specified logging record. + + This version is intended to be implemented by subclasses and so + raises a NotImplementedError. + """ + raise NotImplementedError, 'emit must be implemented '\ + 'by Handler subclasses' + + def handle(self, record): + """ + Conditionally emit the specified logging record. + + Emission depends on filters which may have been added to the handler. + Wrap the actual emission of the record with acquisition/release of + the I/O thread lock. Returns whether the filter passed the record for + emission. + """ + rv = self.filter(record) + if rv: + self.acquire() + try: + self.emit(record) + finally: + self.release() + return rv + + def setFormatter(self, fmt): + """ + Set the formatter for this handler. + """ + self.formatter = fmt + + def flush(self): + """ + Ensure all logging output has been flushed. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def close(self): + """ + Tidy up any resources used by the handler. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def handleError(self, record): + """ + Handle errors which occur during an emit() call. + + This method should be called from handlers when an exception is + encountered during an emit() call. If raiseExceptions is false, + exceptions get silently ignored. This is what is mostly wanted + for a logging system - most users will not care about errors in + the logging system, they are more interested in application errors. + You could, however, replace this with a custom handler if you wish. + The record which was being processed is passed in to this method. + """ + if raiseExceptions: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + +class StreamHandler(Handler): + """ + A handler class which writes logging records, appropriately formatted, + to a stream. Note that this class does not close the stream, as + sys.stdout or sys.stderr may be used. + """ + def __init__(self, strm=None): + """ + Initialize the handler. + + If strm is not specified, sys.stderr is used. + """ + Handler.__init__(self) + if not strm: + strm = sys.stderr + self.stream = strm + self.formatter = None + + def flush(self): + """ + Flushes the stream. + """ + self.stream.flush() + + def emit(self, record): + """ + Emit a record. + + If a formatter is specified, it is used to format the record. + The record is then written to the stream with a trailing newline + [N.B. this may be removed depending on feedback]. If exception + information is present, it is formatted using + traceback.print_exception and appended to the stream. + """ + try: + msg = self.format(record) + if not hasattr(types, "UnicodeType"): #if no unicode support... + self.stream.write("%s\n" % msg) + else: + try: + self.stream.write("%s\n" % msg) + except UnicodeError: + self.stream.write("%s\n" % msg.encode("UTF-8")) + self.flush() + except: + self.handleError(record) + +class FileHandler(StreamHandler): + """ + A handler class which writes formatted logging records to disk files. + """ + def __init__(self, filename, mode="a"): + """ + Open the specified file and use it as the stream for logging. + """ + StreamHandler.__init__(self, open(filename, mode)) + self.baseFilename = filename + self.mode = mode + + def close(self): + """ + Closes the stream. + """ + self.stream.close() + +#--------------------------------------------------------------------------- +# Manager classes and functions +#--------------------------------------------------------------------------- + +class PlaceHolder: + """ + PlaceHolder instances are used in the Manager logger hierarchy to take + the place of nodes for which no loggers have been defined [FIXME add + example]. + """ + def __init__(self, alogger): + """ + Initialize with the specified logger being a child of this placeholder. + """ + self.loggers = [alogger] + + def append(self, alogger): + """ + Add the specified logger as a child of this placeholder. + """ + if alogger not in self.loggers: + self.loggers.append(alogger) + +# +# Determine which class to use when instantiating loggers. +# +_loggerClass = None + +def setLoggerClass(klass): + """ + Set the class to be used when instantiating a logger. The class should + define __init__() such that only a name argument is required, and the + __init__() should call Logger.__init__() + """ + if klass != Logger: + if not issubclass(klass, Logger): + raise TypeError, "logger not derived from logging.Logger: " + \ + klass.__name__ + global _loggerClass + _loggerClass = klass + +class Manager: + """ + There is [under normal circumstances] just one Manager instance, which + holds the hierarchy of loggers. + """ + def __init__(self, rootnode): + """ + Initialize the manager with the root node of the logger hierarchy. + """ + self.root = rootnode + self.disable = 0 + self.emittedNoHandlerWarning = 0 + self.loggerDict = {} + + def getLogger(self, name): + """ + Get a logger with the specified name (channel name), creating it + if it doesn't yet exist. + + If a PlaceHolder existed for the specified name [i.e. the logger + didn't exist but a child of it did], replace it with the created + logger and fix up the parent/child references which pointed to the + placeholder to now point to the logger. + """ + rv = None + _acquireLock() + try: + if self.loggerDict.has_key(name): + rv = self.loggerDict[name] + if isinstance(rv, PlaceHolder): + ph = rv + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupChildren(ph, rv) + self._fixupParents(rv) + else: + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupParents(rv) + finally: + _releaseLock() + return rv + + def _fixupParents(self, alogger): + """ + Ensure that there are either loggers or placeholders all the way + from the specified logger to the root of the logger hierarchy. + """ + name = alogger.name + i = string.rfind(name, ".") + rv = None + while (i > 0) and not rv: + substr = name[:i] + if not self.loggerDict.has_key(substr): + self.loggerDict[substr] = PlaceHolder(alogger) + else: + obj = self.loggerDict[substr] + if isinstance(obj, Logger): + rv = obj + else: + assert isinstance(obj, PlaceHolder) + obj.append(alogger) + i = string.rfind(name, ".", 0, i - 1) + if not rv: + rv = self.root + alogger.parent = rv + + def _fixupChildren(self, ph, alogger): + """ + Ensure that children of the placeholder ph are connected to the + specified logger. + """ + for c in ph.loggers: + if string.find(c.parent.name, alogger.name) <> 0: + alogger.parent = c.parent + c.parent = alogger + +#--------------------------------------------------------------------------- +# Logger classes and functions +#--------------------------------------------------------------------------- + +class Logger(Filterer): + """ + Instances of the Logger class represent a single logging channel. A + "logging channel" indicates an area of an application. Exactly how an + "area" is defined is up to the application developer. Since an + application can have any number of areas, logging channels are identified + by a unique string. Application areas can be nested (e.g. an area + of "input processing" might include sub-areas "read CSV files", "read + XLS files" and "read Gnumeric files"). To cater for this natural nesting, + channel names are organized into a namespace hierarchy where levels are + separated by periods, much like the Java or Python package namespace. So + in the instance given above, channel names might be "input" for the upper + level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. + There is no arbitrary limit to the depth of nesting. + """ + def __init__(self, name, level=NOTSET): + """ + Initialize the logger with a name and an optional level. + """ + Filterer.__init__(self) + self.name = name + self.level = level + self.parent = None + self.propagate = 1 + self.handlers = [] + self.disabled = 0 + + def setLevel(self, level): + """ + Set the logging level of this logger. + """ + self.level = level + +# def getRoot(self): +# """ +# Get the root of the logger hierarchy. +# """ +# return Logger.root + + def debug(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'DEBUG'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) + """ + if self.manager.disable >= DEBUG: + return + if DEBUG >= self.getEffectiveLevel(): + apply(self._log, (DEBUG, msg, args), kwargs) + + def info(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'INFO'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.info("Houston, we have a %s", "interesting problem", exc_info=1) + """ + if self.manager.disable >= INFO: + return + if INFO >= self.getEffectiveLevel(): + apply(self._log, (INFO, msg, args), kwargs) + + def warning(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'WARNING'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) + """ + if self.manager.disable >= WARNING: + return + if self.isEnabledFor(WARNING): + apply(self._log, (WARNING, msg, args), kwargs) + + warn = warning + + def error(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'ERROR'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.error("Houston, we have a %s", "major problem", exc_info=1) + """ + if self.manager.disable >= ERROR: + return + if self.isEnabledFor(ERROR): + apply(self._log, (ERROR, msg, args), kwargs) + + def exception(self, msg, *args): + """ + Convenience method for logging an ERROR with exception information. + """ + apply(self.error, (msg,) + args, {'exc_info': 1}) + + def critical(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'CRITICAL'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.critical("Houston, we have a %s", "major disaster", exc_info=1) + """ + if self.manager.disable >= CRITICAL: + return + if CRITICAL >= self.getEffectiveLevel(): + apply(self._log, (CRITICAL, msg, args), kwargs) + + fatal = critical + + def log(self, level, msg, *args, **kwargs): + """ + Log 'msg % args' with the severity 'level'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.log(level, "We have a %s", "mysterious problem", exc_info=1) + """ + if self.manager.disable >= level: + return + if self.isEnabledFor(level): + apply(self._log, (level, msg, args), kwargs) + + def findCaller(self): + """ + Find the stack frame of the caller so that we can note the source + file name and line number. + """ + f = sys._getframe(1) + while 1: + co = f.f_code + filename = os.path.normcase(co.co_filename) + if filename == _srcfile: + f = f.f_back + continue + return filename, f.f_lineno + + def makeRecord(self, name, level, fn, lno, msg, args, exc_info): + """ + A factory method which can be overridden in subclasses to create + specialized LogRecords. + """ + return LogRecord(name, level, fn, lno, msg, args, exc_info) + + def _log(self, level, msg, args, exc_info=None): + """ + Low-level logging routine which creates a LogRecord and then calls + all the handlers of this logger to handle the record. + """ + if _srcfile: + fn, lno = self.findCaller() + else: + fn, lno = "", 0 + if exc_info: + exc_info = sys.exc_info() + record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info) + self.handle(record) + + def handle(self, record): + """ + Call the handlers for the specified record. + + This method is used for unpickled records received from a socket, as + well as those created locally. Logger-level filtering is applied. + """ + if (not self.disabled) and self.filter(record): + self.callHandlers(record) + + def addHandler(self, hdlr): + """ + Add the specified handler to this logger. + """ + if not (hdlr in self.handlers): + self.handlers.append(hdlr) + + def removeHandler(self, hdlr): + """ + Remove the specified handler from this logger. + """ + if hdlr in self.handlers: + #hdlr.close() + self.handlers.remove(hdlr) + + def callHandlers(self, record): + """ + Pass a record to all relevant handlers. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. If no handler was found, output a one-off error + message to sys.stderr. Stop searching up the hierarchy whenever a + logger with the "propagate" attribute set to zero is found - that + will be the last logger whose handlers are called. + """ + c = self + found = 0 + while c: + for hdlr in c.handlers: + found = found + 1 + if record.levelno >= hdlr.level: + hdlr.handle(record) + if not c.propagate: + c = None #break out + else: + c = c.parent + if (found == 0) and not self.manager.emittedNoHandlerWarning: + sys.stderr.write("No handlers could be found for logger" + " \"%s\"\n" % self.name) + self.manager.emittedNoHandlerWarning = 1 + + def getEffectiveLevel(self): + """ + Get the effective level for this logger. + + Loop through this logger and its parents in the logger hierarchy, + looking for a non-zero logging level. Return the first one found. + """ + logger = self + while logger: + if logger.level: + return logger.level + logger = logger.parent + return NOTSET + + def isEnabledFor(self, level): + """ + Is this logger enabled for level 'level'? + """ + if self.manager.disable >= level: + return 0 + return level >= self.getEffectiveLevel() + +class RootLogger(Logger): + """ + A root logger is not that different to any other logger, except that + it must have a logging level and there is only one instance of it in + the hierarchy. + """ + def __init__(self, level): + """ + Initialize the logger with the name "root". + """ + Logger.__init__(self, "root", level) + +_loggerClass = Logger + +root = RootLogger(WARNING) +Logger.root = root +Logger.manager = Manager(Logger.root) + +#--------------------------------------------------------------------------- +# Configuration classes and functions +#--------------------------------------------------------------------------- + +BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" + +def basicConfig(): + """ + Do basic configuration for the logging system by creating a + StreamHandler with a default Formatter and adding it to the + root logger. + """ + if len(root.handlers) == 0: + hdlr = StreamHandler() + fmt = Formatter(BASIC_FORMAT) + hdlr.setFormatter(fmt) + root.addHandler(hdlr) + +#--------------------------------------------------------------------------- +# Utility functions at module level. +# Basically delegate everything to the root logger. +#--------------------------------------------------------------------------- + +def getLogger(name=None): + """ + Return a logger with the specified name, creating it if necessary. + + If no name is specified, return the root logger. + """ + if name: + return Logger.manager.getLogger(name) + else: + return root + +#def getRootLogger(): +# """ +# Return the root logger. +# +# Note that getLogger('') now does the same thing, so this function is +# deprecated and may disappear in the future. +# """ +# return root + +def critical(msg, *args, **kwargs): + """ + Log a message with severity 'CRITICAL' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.critical, (msg,)+args, kwargs) + +fatal = critical + +def error(msg, *args, **kwargs): + """ + Log a message with severity 'ERROR' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.error, (msg,)+args, kwargs) + +def exception(msg, *args): + """ + Log a message with severity 'ERROR' on the root logger, + with exception information. + """ + apply(error, (msg,)+args, {'exc_info': 1}) + +def warning(msg, *args, **kwargs): + """ + Log a message with severity 'WARNING' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.warning, (msg,)+args, kwargs) + +warn = warning + +def info(msg, *args, **kwargs): + """ + Log a message with severity 'INFO' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.info, (msg,)+args, kwargs) + +def debug(msg, *args, **kwargs): + """ + Log a message with severity 'DEBUG' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.debug, (msg,)+args, kwargs) + +def disable(level): + """ + Disable all logging calls less severe than 'level'. + """ + root.manager.disable = level + +def shutdown(): + """ + Perform any cleanup actions in the logging system (e.g. flushing + buffers). + + Should be called at application exit. + """ + for h in _handlers.keys(): + h.flush() + h.close() diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/compat_logging/config.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/compat_logging/config.py new file mode 100755 index 0000000..d4d08f0 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/compat_logging/config.py @@ -0,0 +1,299 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, logging.handlers, string, thread, threading, socket, struct, os + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + + +DEFAULT_LOGGING_CONFIG_PORT = 9030 +if sys.platform == "win32": + RESET_ERROR = 10054 #WSAECONNRESET +else: + RESET_ERROR = 104 #ECONNRESET + +# +# The following code implements a socket listener for on-the-fly +# reconfiguration of logging. +# +# _listener holds the server object doing the listening +_listener = None + +def fileConfig(fname, defaults=None): + """ + Read the logging configuration from a ConfigParser-format file. + + This can be called several times from an application, allowing an end user + the ability to select from various pre-canned configurations (if the + developer provides a mechanism to present the choices and load the chosen + configuration). + In versions of ConfigParser which have the readfp method [typically + shipped in 2.x versions of Python], you can pass in a file-like object + rather than a filename, in which case the file-like object will be read + using readfp. + """ + import ConfigParser + + cp = ConfigParser.ConfigParser(defaults) + if hasattr(cp, 'readfp') and hasattr(fname, 'readline'): + cp.readfp(fname) + else: + cp.read(fname) + #first, do the formatters... + flist = cp.get("formatters", "keys") + if len(flist): + flist = string.split(flist, ",") + formatters = {} + for form in flist: + sectname = "formatter_%s" % form + opts = cp.options(sectname) + if "format" in opts: + fs = cp.get(sectname, "format", 1) + else: + fs = None + if "datefmt" in opts: + dfs = cp.get(sectname, "datefmt", 1) + else: + dfs = None + f = logging.Formatter(fs, dfs) + formatters[form] = f + #next, do the handlers... + #critical section... + logging._acquireLock() + try: + try: + #first, lose the existing handlers... + logging._handlers.clear() + #now set up the new ones... + hlist = cp.get("handlers", "keys") + if len(hlist): + hlist = string.split(hlist, ",") + handlers = {} + fixups = [] #for inter-handler references + for hand in hlist: + sectname = "handler_%s" % hand + klass = cp.get(sectname, "class") + opts = cp.options(sectname) + if "formatter" in opts: + fmt = cp.get(sectname, "formatter") + else: + fmt = "" + klass = eval(klass, vars(logging)) + args = cp.get(sectname, "args") + args = eval(args, vars(logging)) + h = apply(klass, args) + if "level" in opts: + level = cp.get(sectname, "level") + h.setLevel(logging._levelNames[level]) + if len(fmt): + h.setFormatter(formatters[fmt]) + #temporary hack for FileHandler and MemoryHandler. + if klass == logging.handlers.MemoryHandler: + if "target" in opts: + target = cp.get(sectname,"target") + else: + target = "" + if len(target): #the target handler may not be loaded yet, so keep for later... + fixups.append((h, target)) + handlers[hand] = h + #now all handlers are loaded, fixup inter-handler references... + for fixup in fixups: + h = fixup[0] + t = fixup[1] + h.setTarget(handlers[t]) + #at last, the loggers...first the root... + llist = cp.get("loggers", "keys") + llist = string.split(llist, ",") + llist.remove("root") + sectname = "logger_root" + root = logging.root + log = root + opts = cp.options(sectname) + if "level" in opts: + level = cp.get(sectname, "level") + log.setLevel(logging._levelNames[level]) + for h in root.handlers[:]: + root.removeHandler(h) + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + log.addHandler(handlers[hand]) + #and now the others... + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + existing = root.manager.loggerDict.keys() + #now set up the new ones... + for log in llist: + sectname = "logger_%s" % log + qn = cp.get(sectname, "qualname") + opts = cp.options(sectname) + if "propagate" in opts: + propagate = cp.getint(sectname, "propagate") + else: + propagate = 1 + logger = logging.getLogger(qn) + if qn in existing: + existing.remove(qn) + if "level" in opts: + level = cp.get(sectname, "level") + logger.setLevel(logging._levelNames[level]) + for h in logger.handlers[:]: + logger.removeHandler(h) + logger.propagate = propagate + logger.disabled = 0 + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + logger.addHandler(handlers[hand]) + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + for log in existing: + root.manager.loggerDict[log].disabled = 1 + except: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + finally: + logging._releaseLock() + +def listen(port=DEFAULT_LOGGING_CONFIG_PORT): + """ + Start up a socket server on the specified port, and listen for new + configurations. + + These will be sent as a file suitable for processing by fileConfig(). + Returns a Thread object on which you can call start() to start the server, + and which you can join() when appropriate. To stop the server, call + stopListening(). + """ + if not thread: + raise NotImplementedError, "listen() needs threading to work" + + class ConfigStreamHandler(StreamRequestHandler): + """ + Handler for a logging configuration request. + + It expects a completely new logging configuration and uses fileConfig + to install it. + """ + def handle(self): + """ + Handle a request. + + Each request is expected to be a 4-byte length, + followed by the config file. Uses fileConfig() to do the + grunt work. + """ + import tempfile + try: + conn = self.connection + chunk = conn.recv(4) + if len(chunk) == 4: + slen = struct.unpack(">L", chunk)[0] + chunk = self.connection.recv(slen) + while len(chunk) < slen: + chunk = chunk + conn.recv(slen - len(chunk)) + #Apply new configuration. We'd like to be able to + #create a StringIO and pass that in, but unfortunately + #1.5.2 ConfigParser does not support reading file + #objects, only actual files. So we create a temporary + #file and remove it later. + file = tempfile.mktemp(".ini") + f = open(file, "w") + f.write(chunk) + f.close() + fileConfig(file) + os.remove(file) + except socket.error, e: + if type(e.args) != types.TupleType: + raise + else: + errcode = e.args[0] + if errcode != RESET_ERROR: + raise + + class ConfigSocketReceiver(ThreadingTCPServer): + """ + A simple TCP socket-based logging config receiver. + """ + + allow_reuse_address = 1 + + def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, + handler=None): + ThreadingTCPServer.__init__(self, (host, port), handler) + logging._acquireLock() + self.abort = 0 + logging._releaseLock() + self.timeout = 1 + + def serve_until_stopped(self): + import select + abort = 0 + while not abort: + rd, wr, ex = select.select([self.socket.fileno()], + [], [], + self.timeout) + if rd: + self.handle_request() + logging._acquireLock() + abort = self.abort + logging._releaseLock() + + def serve(rcvr, hdlr, port): + server = rcvr(port=port, handler=hdlr) + global _listener + logging._acquireLock() + _listener = server + logging._releaseLock() + server.serve_until_stopped() + + return threading.Thread(target=serve, + args=(ConfigSocketReceiver, + ConfigStreamHandler, port)) + +def stopListening(): + """ + Stop the listening server which was created with a call to listen(). + """ + global _listener + if _listener: + logging._acquireLock() + _listener.abort = 1 + _listener = None + logging._releaseLock() diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/compat_logging/handlers.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/compat_logging/handlers.py new file mode 100755 index 0000000..26ca8ad --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/compat_logging/handlers.py @@ -0,0 +1,728 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, socket, types, os, string, cPickle, struct, time + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + +# +# Some constants... +# + +DEFAULT_TCP_LOGGING_PORT = 9020 +DEFAULT_UDP_LOGGING_PORT = 9021 +DEFAULT_HTTP_LOGGING_PORT = 9022 +DEFAULT_SOAP_LOGGING_PORT = 9023 +SYSLOG_UDP_PORT = 514 + + +class RotatingFileHandler(logging.FileHandler): + def __init__(self, filename, mode="a", maxBytes=0, backupCount=0): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + """ + logging.FileHandler.__init__(self, filename, mode) + self.maxBytes = maxBytes + self.backupCount = backupCount + if maxBytes > 0: + self.mode = "a" + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + + self.stream.close() + if self.backupCount > 0: + for i in range(self.backupCount - 1, 0, -1): + sfn = "%s.%d" % (self.baseFilename, i) + dfn = "%s.%d" % (self.baseFilename, i + 1) + if os.path.exists(sfn): + #print "%s -> %s" % (sfn, dfn) + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.baseFilename + ".1" + if os.path.exists(dfn): + os.remove(dfn) + os.rename(self.baseFilename, dfn) + #print "%s -> %s" % (self.baseFilename, dfn) + self.stream = open(self.baseFilename, "w") + + def emit(self, record): + """ + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + """ + if self.maxBytes > 0: # are we rolling over? + msg = "%s\n" % self.format(record) + self.stream.seek(0, 2) #due to non-posix-compliant Windows feature + if self.stream.tell() + len(msg) >= self.maxBytes: + self.doRollover() + logging.FileHandler.emit(self, record) + + +class SocketHandler(logging.Handler): + """ + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + """ + + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + + The attribute 'closeOnError' is set to 1 - which means that if + a socket error occurs, the socket is silently closed and then + reopened on the next logging call. + """ + logging.Handler.__init__(self) + self.host = host + self.port = port + self.sock = None + self.closeOnError = 0 + + def makeSocket(self): + """ + A factory method which allows subclasses to define the precise + type of socket they want. + """ + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((self.host, self.port)) + return s + + def send(self, s): + """ + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + """ + if hasattr(self.sock, "sendall"): + self.sock.sendall(s) + else: + sentsofar = 0 + left = len(s) + while left > 0: + sent = self.sock.send(s[sentsofar:]) + sentsofar = sentsofar + sent + left = left - sent + + def makePickle(self, record): + """ + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + """ + s = cPickle.dumps(record.__dict__, 1) + #n = len(s) + #slen = "%c%c" % ((n >> 8) & 0xFF, n & 0xFF) + slen = struct.pack(">L", len(s)) + return slen + s + + def handleError(self, record): + """ + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + """ + if self.closeOnError and self.sock: + self.sock.close() + self.sock = None #try to reconnect next time + else: + logging.Handler.handleError(self, record) + + def emit(self, record): + """ + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + """ + try: + s = self.makePickle(record) + if not self.sock: + self.sock = self.makeSocket() + self.send(s) + except: + self.handleError(record) + + def close(self): + """ + Closes the socket. + """ + if self.sock: + self.sock.close() + self.sock = None + +class DatagramHandler(SocketHandler): + """ + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + """ + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + """ + SocketHandler.__init__(self, host, port) + self.closeOnError = 0 + + def makeSocket(self): + """ + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + """ + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + return s + + def send(self, s): + """ + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + """ + self.sock.sendto(s, (self.host, self.port)) + +class SysLogHandler(logging.Handler): + """ + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + """ + + # from : + # ====================================================================== + # priorities/facilities are encoded into a single 32-bit quantity, where + # the bottom 3 bits are the priority (0-7) and the top 28 bits are the + # facility (0-big number). Both the priorities and the facilities map + # roughly one-to-one to strings in the syslogd(8) source code. This + # mapping is included in this file. + # + # priorities (these are ordered) + + LOG_EMERG = 0 # system is unusable + LOG_ALERT = 1 # action must be taken immediately + LOG_CRIT = 2 # critical conditions + LOG_ERR = 3 # error conditions + LOG_WARNING = 4 # warning conditions + LOG_NOTICE = 5 # normal but significant condition + LOG_INFO = 6 # informational + LOG_DEBUG = 7 # debug-level messages + + # facility codes + LOG_KERN = 0 # kernel messages + LOG_USER = 1 # random user-level messages + LOG_MAIL = 2 # mail system + LOG_DAEMON = 3 # system daemons + LOG_AUTH = 4 # security/authorization messages + LOG_SYSLOG = 5 # messages generated internally by syslogd + LOG_LPR = 6 # line printer subsystem + LOG_NEWS = 7 # network news subsystem + LOG_UUCP = 8 # UUCP subsystem + LOG_CRON = 9 # clock daemon + LOG_AUTHPRIV = 10 # security/authorization messages (private) + + # other codes through 15 reserved for system use + LOG_LOCAL0 = 16 # reserved for local use + LOG_LOCAL1 = 17 # reserved for local use + LOG_LOCAL2 = 18 # reserved for local use + LOG_LOCAL3 = 19 # reserved for local use + LOG_LOCAL4 = 20 # reserved for local use + LOG_LOCAL5 = 21 # reserved for local use + LOG_LOCAL6 = 22 # reserved for local use + LOG_LOCAL7 = 23 # reserved for local use + + priority_names = { + "alert": LOG_ALERT, + "crit": LOG_CRIT, + "critical": LOG_CRIT, + "debug": LOG_DEBUG, + "emerg": LOG_EMERG, + "err": LOG_ERR, + "error": LOG_ERR, # DEPRECATED + "info": LOG_INFO, + "notice": LOG_NOTICE, + "panic": LOG_EMERG, # DEPRECATED + "warn": LOG_WARNING, # DEPRECATED + "warning": LOG_WARNING, + } + + facility_names = { + "auth": LOG_AUTH, + "authpriv": LOG_AUTHPRIV, + "cron": LOG_CRON, + "daemon": LOG_DAEMON, + "kern": LOG_KERN, + "lpr": LOG_LPR, + "mail": LOG_MAIL, + "news": LOG_NEWS, + "security": LOG_AUTH, # DEPRECATED + "syslog": LOG_SYSLOG, + "user": LOG_USER, + "uucp": LOG_UUCP, + "local0": LOG_LOCAL0, + "local1": LOG_LOCAL1, + "local2": LOG_LOCAL2, + "local3": LOG_LOCAL3, + "local4": LOG_LOCAL4, + "local5": LOG_LOCAL5, + "local6": LOG_LOCAL6, + "local7": LOG_LOCAL7, + } + + def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): + """ + Initialize a handler. + + If address is specified as a string, UNIX socket is used. + If facility is not specified, LOG_USER is used. + """ + logging.Handler.__init__(self) + + self.address = address + self.facility = facility + if type(address) == types.StringType: + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + # syslog may require either DGRAM or STREAM sockets + try: + self.socket.connect(address) + except socket.error: + self.socket.close() + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.socket.connect(address) + self.unixsocket = 1 + else: + self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.unixsocket = 0 + + self.formatter = None + + # curious: when talking to the unix-domain '/dev/log' socket, a + # zero-terminator seems to be required. this string is placed + # into a class variable so that it can be overridden if + # necessary. + log_format_string = '<%d>%s\000' + + def encodePriority (self, facility, priority): + """ + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + """ + if type(facility) == types.StringType: + facility = self.facility_names[facility] + if type(priority) == types.StringType: + priority = self.priority_names[priority] + return (facility << 3) | priority + + def close (self): + """ + Closes the socket. + """ + if self.unixsocket: + self.socket.close() + + def emit(self, record): + """ + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + """ + msg = self.format(record) + """ + We need to convert record level to lowercase, maybe this will + change in the future. + """ + msg = self.log_format_string % ( + self.encodePriority(self.facility, + string.lower(record.levelname)), + msg) + try: + if self.unixsocket: + self.socket.send(msg) + else: + self.socket.sendto(msg, self.address) + except: + self.handleError(record) + +class SMTPHandler(logging.Handler): + """ + A handler class which sends an SMTP email for each logging event. + """ + def __init__(self, mailhost, fromaddr, toaddrs, subject): + """ + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. + """ + logging.Handler.__init__(self) + if type(mailhost) == types.TupleType: + host, port = mailhost + self.mailhost = host + self.mailport = port + else: + self.mailhost = mailhost + self.mailport = None + self.fromaddr = fromaddr + if type(toaddrs) == types.StringType: + toaddrs = [toaddrs] + self.toaddrs = toaddrs + self.subject = subject + + def getSubject(self, record): + """ + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + """ + return self.subject + + weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + + monthname = [None, + 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + + def date_time(self): + """Return the current date and time formatted for a MIME header.""" + year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time()) + s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + self.weekdayname[wd], + day, self.monthname[month], year, + hh, mm, ss) + return s + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + """ + try: + import smtplib + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port) + msg = self.format(record) + msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( + self.fromaddr, + string.join(self.toaddrs, ","), + self.getSubject(record), + self.date_time(), msg) + smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.quit() + except: + self.handleError(record) + +class NTEventLogHandler(logging.Handler): + """ + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + """ + def __init__(self, appname, dllname=None, logtype="Application"): + logging.Handler.__init__(self) + try: + import win32evtlogutil, win32evtlog + self.appname = appname + self._welu = win32evtlogutil + if not dllname: + dllname = os.path.split(self._welu.__file__) + dllname = os.path.split(dllname[0]) + dllname = os.path.join(dllname[0], r'win32service.pyd') + self.dllname = dllname + self.logtype = logtype + self._welu.AddSourceToRegistry(appname, dllname, logtype) + self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE + self.typemap = { + logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, + logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, + logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, + } + except ImportError: + print "The Python Win32 extensions for NT (service, event "\ + "logging) appear not to be available." + self._welu = None + + def getMessageID(self, record): + """ + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + """ + return 1 + + def getEventCategory(self, record): + """ + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + """ + return 0 + + def getEventType(self, record): + """ + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + """ + return self.typemap.get(record.levelno, self.deftype) + + def emit(self, record): + """ + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + """ + if self._welu: + try: + id = self.getMessageID(record) + cat = self.getEventCategory(record) + type = self.getEventType(record) + msg = self.format(record) + self._welu.ReportEvent(self.appname, id, cat, type, [msg]) + except: + self.handleError(record) + + def close(self): + """ + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + """ + #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) + pass + +class HTTPHandler(logging.Handler): + """ + A class which sends records to a Web server, using either GET or + POST semantics. + """ + def __init__(self, host, url, method="GET"): + """ + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + """ + logging.Handler.__init__(self) + method = string.upper(method) + if method not in ["GET", "POST"]: + raise ValueError, "method must be GET or POST" + self.host = host + self.url = url + self.method = method + + def mapLogRecord(self, record): + """ + Default implementation of mapping the log record into a dict + that is send as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + """ + return record.__dict__ + + def emit(self, record): + """ + Emit a record. + + Send the record to the Web server as an URL-encoded dictionary + """ + try: + import httplib, urllib + h = httplib.HTTP(self.host) + url = self.url + data = urllib.urlencode(self.mapLogRecord(record)) + if self.method == "GET": + if (string.find(url, '?') >= 0): + sep = '&' + else: + sep = '?' + url = url + "%c%s" % (sep, data) + h.putrequest(self.method, url) + if self.method == "POST": + h.putheader("Content-length", str(len(data))) + h.endheaders() + if self.method == "POST": + h.send(data) + h.getreply() #can't do anything with the result + except: + self.handleError(record) + +class BufferingHandler(logging.Handler): + """ + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + """ + def __init__(self, capacity): + """ + Initialize the handler with the buffer size. + """ + logging.Handler.__init__(self) + self.capacity = capacity + self.buffer = [] + + def shouldFlush(self, record): + """ + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + """ + return (len(self.buffer) >= self.capacity) + + def emit(self, record): + """ + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + """ + self.buffer.append(record) + if self.shouldFlush(record): + self.flush() + + def flush(self): + """ + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + """ + self.buffer = [] + +class MemoryHandler(BufferingHandler): + """ + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + """ + def __init__(self, capacity, flushLevel=logging.ERROR, target=None): + """ + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + """ + BufferingHandler.__init__(self, capacity) + self.flushLevel = flushLevel + self.target = target + + def shouldFlush(self, record): + """ + Check for buffer full or a record at the flushLevel or higher. + """ + return (len(self.buffer) >= self.capacity) or \ + (record.levelno >= self.flushLevel) + + def setTarget(self, target): + """ + Set the target handler for this handler. + """ + self.target = target + + def flush(self): + """ + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + """ + if self.target: + for record in self.buffer: + self.target.handle(record) + self.buffer = [] + + def close(self): + """ + Flush, set the target to None and lose the buffer. + """ + self.flush() + self.target = None + self.buffer = [] diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/feedparser.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/feedparser.py new file mode 100755 index 0000000..615ee7e --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/feedparser.py @@ -0,0 +1,2931 @@ +#!/usr/bin/env python +"""Universal feed parser + +Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds + +Visit http://feedparser.org/ for the latest version +Visit http://feedparser.org/docs/ for the latest documentation + +Required: Python 2.1 or later +Recommended: Python 2.3 or later +Recommended: CJKCodecs and iconv_codec +""" + +__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs" +__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.""" +__author__ = "Mark Pilgrim " +__contributors__ = ["Jason Diamond ", + "John Beimler ", + "Fazal Majid ", + "Aaron Swartz ", + "Kevin Marks "] +_debug = 0 + +# HTTP "User-Agent" header to send to servers when downloading feeds. +# If you are embedding feedparser in a larger application, you should +# change this to your application name and URL. +USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ + +# HTTP "Accept" header to send to servers when downloading feeds. If you don't +# want to send an Accept header, set this to None. +ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" + +# List of preferred XML parsers, by SAX driver name. These will be tried first, +# but if they're not installed, Python will keep searching through its own list +# of pre-installed parsers until it finds one that supports everything we need. +PREFERRED_XML_PARSERS = ["drv_libxml2"] + +# If you want feedparser to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +# ---------- required modules (should come with any Python distribution) ---------- +import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 +try: + from cStringIO import StringIO as _StringIO +except: + from StringIO import StringIO as _StringIO + +# ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- + +# gzip is included with most Python distributions, but may not be available if you compiled your own +try: + import gzip +except: + gzip = None +try: + import zlib +except: + zlib = None + +# If a real XML parser is available, feedparser will attempt to use it. feedparser has +# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the +# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some +# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. +try: + import xml.sax + xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers + from xml.sax.saxutils import escape as _xmlescape + _XML_AVAILABLE = 1 +except: + _XML_AVAILABLE = 0 + def _xmlescape(data,entities={}): + data = data.replace('&', '&') + data = data.replace('>', '>') + data = data.replace('<', '<') + for char, entity in entities: + data = data.replace(char, entity) + return data + +# base64 support for Atom feeds that contain embedded binary data +try: + import base64, binascii +except: + base64 = binascii = None + +# cjkcodecs and iconv_codec provide support for more character encodings. +# Both are available from http://cjkpython.i18n.org/ +try: + import cjkcodecs.aliases +except: + pass +try: + import iconv_codec +except: + pass + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 +except: + chardet = None + +# ---------- don't touch these ---------- +class ThingsNobodyCaresAboutButMe(Exception): pass +class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass +class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass +class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass +class UndeclaredNamespace(Exception): pass + +sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') +sgmllib.special = re.compile('' % (tag, self.strattrs(attrs)), escape=0) + + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # special hack for better tracking of empty textinput/image elements in illformed feeds + if (not prefix) and tag not in ('title', 'link', 'description', 'name'): + self.intextinput = 0 + if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): + self.inimage = 0 + + # call special handler (if defined) or default handler + methodname = '_start_' + prefix + suffix + try: + method = getattr(self, methodname) + return method(attrsD) + except AttributeError: + return self.push(prefix + suffix, 1) + + def unknown_endtag(self, tag): + if _debug: sys.stderr.write('end %s\n' % tag) + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # call special handler (if defined) or default handler + methodname = '_end_' + prefix + suffix + try: + method = getattr(self, methodname) + method() + except AttributeError: + self.pop(prefix + suffix) + + # track inline content + if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + # element declared itself as escaped markup, but it isn't really + self.contentparams['type'] = 'application/xhtml+xml' + if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': + tag = tag.split(':')[-1] + self.handle_data('' % tag, escape=0) + + # track xml:base and xml:lang going out of scope + if self.basestack: + self.basestack.pop() + if self.basestack and self.basestack[-1]: + self.baseuri = self.basestack[-1] + if self.langstack: + self.langstack.pop() + if self.langstack: # and (self.langstack[-1] is not None): + self.lang = self.langstack[-1] + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + if not self.elementstack: return + ref = ref.lower() + if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): + text = '&#%s;' % ref + else: + if ref[0] == 'x': + c = int(ref[1:], 16) + else: + c = int(ref) + text = unichr(c).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + if not self.elementstack: return + if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) + if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): + text = '&%s;' % ref + else: + # entity resolution graciously donated by Aaron Swartz + def name2cp(k): + import htmlentitydefs + if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 + return htmlentitydefs.name2codepoint[k] + k = htmlentitydefs.entitydefs[k] + if k.startswith('&#') and k.endswith(';'): + return int(k[2:-1]) # not in latin-1 + return ord(k) + try: name2cp(ref) + except KeyError: text = '&%s;' % ref + else: text = unichr(name2cp(ref)).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_data(self, text, escape=1): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + if not self.elementstack: return + if escape and self.contentparams.get('type') == 'application/xhtml+xml': + text = _xmlescape(text) + self.elementstack[-1][2].append(text) + + def handle_comment(self, text): + # called for each comment, e.g. + pass + + def handle_pi(self, text): + # called for each processing instruction, e.g. + pass + + def handle_decl(self, text): + pass + + def parse_declaration(self, i): + # override internal declaration handler to handle CDATA blocks + if _debug: sys.stderr.write('entering parse_declaration\n') + if self.rawdata[i:i+9] == '', i) + if k == -1: k = len(self.rawdata) + self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) + return k+3 + else: + k = self.rawdata.find('>', i) + return k+1 + + def mapContentType(self, contentType): + contentType = contentType.lower() + if contentType == 'text': + contentType = 'text/plain' + elif contentType == 'html': + contentType = 'text/html' + elif contentType == 'xhtml': + contentType = 'application/xhtml+xml' + return contentType + + def trackNamespace(self, prefix, uri): + loweruri = uri.lower() + if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: + self.version = 'rss090' + if loweruri == 'http://purl.org/rss/1.0/' and not self.version: + self.version = 'rss10' + if loweruri == 'http://www.w3.org/2005/atom' and not self.version: + self.version = 'atom10' + if loweruri.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + uri = 'http://backend.userland.com/rss' + loweruri = uri + if self._matchnamespaces.has_key(loweruri): + self.namespacemap[prefix] = self._matchnamespaces[loweruri] + self.namespacesInUse[self._matchnamespaces[loweruri]] = uri + else: + self.namespacesInUse[prefix or ''] = uri + + def resolveURI(self, uri): + return _urljoin(self.baseuri or '', uri) + + def decodeEntities(self, element, data): + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs]) + + def push(self, element, expectingText): + self.elementstack.append([element, expectingText, []]) + + def pop(self, element, stripWhitespace=1): + if not self.elementstack: return + if self.elementstack[-1][0] != element: return + + element, expectingText, pieces = self.elementstack.pop() + + if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml': + # remove enclosing child element, but only if it is a
and + # only if all the remaining content is nested underneath it. + # This means that the divs would be retained in the following: + #
foo
bar
+ if pieces and (pieces[0] == '
' or pieces[0].startswith('
': + depth = 0 + for piece in pieces[:-1]: + if piece.startswith(''): + depth += 1 + else: + pieces = pieces[1:-1] + + output = ''.join(pieces) + if stripWhitespace: + output = output.strip() + if not expectingText: return output + + # decode base64 content + if base64 and self.contentparams.get('base64', 0): + try: + output = base64.decodestring(output) + except binascii.Error: + pass + except binascii.Incomplete: + pass + + # resolve relative URIs + if (element in self.can_be_relative_uri) and output: + output = self.resolveURI(output) + + # decode entities within embedded markup + if not self.contentparams.get('base64', 0): + output = self.decodeEntities(element, output) + + # remove temporary cruft from contentparams + try: + del self.contentparams['mode'] + except KeyError: + pass + try: + del self.contentparams['base64'] + except KeyError: + pass + + # resolve relative URIs within embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_relative_uris: + output = _resolveRelativeURIs(output, self.baseuri, self.encoding) + + # sanitize embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_dangerous_markup: + output = _sanitizeHTML(output, self.encoding) + + if self.encoding and type(output) != type(u''): + try: + output = unicode(output, self.encoding) + except: + pass + + # address common error where people take data that is already + # utf-8, presume that it is iso-8859-1, and re-encode it. + if self.encoding=='utf-8' and type(output) == type(u''): + try: + output = unicode(output.encode('iso-8859-1'), 'utf-8') + except: + pass + + # map win-1252 extensions to the proper code points + if type(output) == type(u''): + output = u''.join([c in cp1252 and cp1252[c] or c for c in output]) + + # categories/tags/keywords/whatever are handled in _end_category + if element == 'category': + return output + + # store output in appropriate place(s) + if self.inentry and not self.insource: + if element == 'content': + self.entries[-1].setdefault(element, []) + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element].append(contentparams) + elif element == 'link': + self.entries[-1][element] = output + if output: + self.entries[-1]['links'][-1]['href'] = output + else: + if element == 'description': + element = 'summary' + self.entries[-1][element] = output + if self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element + '_detail'] = contentparams + elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage): + context = self._getContext() + if element == 'description': + element = 'subtitle' + context[element] = output + if element == 'link': + context['links'][-1]['href'] = output + elif self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + context[element + '_detail'] = contentparams + return output + + def pushContent(self, tag, attrsD, defaultContentType, expectingText): + self.incontent += 1 + self.contentparams = FeedParserDict({ + 'type': self.mapContentType(attrsD.get('type', defaultContentType)), + 'language': self.lang, + 'base': self.baseuri}) + self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) + self.push(tag, expectingText) + + def popContent(self, tag): + value = self.pop(tag) + self.incontent -= 1 + self.contentparams.clear() + return value + + def _mapToStandardPrefix(self, name): + colonpos = name.find(':') + if colonpos <> -1: + prefix = name[:colonpos] + suffix = name[colonpos+1:] + prefix = self.namespacemap.get(prefix, prefix) + name = prefix + ':' + suffix + return name + + def _getAttribute(self, attrsD, name): + return attrsD.get(self._mapToStandardPrefix(name)) + + def _isBase64(self, attrsD, contentparams): + if attrsD.get('mode', '') == 'base64': + return 1 + if self.contentparams['type'].startswith('text/'): + return 0 + if self.contentparams['type'].endswith('+xml'): + return 0 + if self.contentparams['type'].endswith('/xml'): + return 0 + return 1 + + def _itsAnHrefDamnIt(self, attrsD): + href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) + if href: + try: + del attrsD['url'] + except KeyError: + pass + try: + del attrsD['uri'] + except KeyError: + pass + attrsD['href'] = href + return attrsD + + def _save(self, key, value): + context = self._getContext() + context.setdefault(key, value) + + def _start_rss(self, attrsD): + versionmap = {'0.91': 'rss091u', + '0.92': 'rss092', + '0.93': 'rss093', + '0.94': 'rss094'} + if not self.version: + attr_version = attrsD.get('version', '') + version = versionmap.get(attr_version) + if version: + self.version = version + elif attr_version.startswith('2.'): + self.version = 'rss20' + else: + self.version = 'rss' + + def _start_dlhottitles(self, attrsD): + self.version = 'hotrss' + + def _start_channel(self, attrsD): + self.infeed = 1 + self._cdf_common(attrsD) + _start_feedinfo = _start_channel + + def _cdf_common(self, attrsD): + if attrsD.has_key('lastmod'): + self._start_modified({}) + self.elementstack[-1][-1] = attrsD['lastmod'] + self._end_modified() + if attrsD.has_key('href'): + self._start_link({}) + self.elementstack[-1][-1] = attrsD['href'] + self._end_link() + + def _start_feed(self, attrsD): + self.infeed = 1 + versionmap = {'0.1': 'atom01', + '0.2': 'atom02', + '0.3': 'atom03'} + if not self.version: + attr_version = attrsD.get('version') + version = versionmap.get(attr_version) + if version: + self.version = version + else: + self.version = 'atom' + + def _end_channel(self): + self.infeed = 0 + _end_feed = _end_channel + + def _start_image(self, attrsD): + self.inimage = 1 + self.push('image', 0) + context = self._getContext() + context.setdefault('image', FeedParserDict()) + + def _end_image(self): + self.pop('image') + self.inimage = 0 + + def _start_textinput(self, attrsD): + self.intextinput = 1 + self.push('textinput', 0) + context = self._getContext() + context.setdefault('textinput', FeedParserDict()) + _start_textInput = _start_textinput + + def _end_textinput(self): + self.pop('textinput') + self.intextinput = 0 + _end_textInput = _end_textinput + + def _start_author(self, attrsD): + self.inauthor = 1 + self.push('author', 1) + _start_managingeditor = _start_author + _start_dc_author = _start_author + _start_dc_creator = _start_author + _start_itunes_author = _start_author + + def _end_author(self): + self.pop('author') + self.inauthor = 0 + self._sync_author_detail() + _end_managingeditor = _end_author + _end_dc_author = _end_author + _end_dc_creator = _end_author + _end_itunes_author = _end_author + + def _start_itunes_owner(self, attrsD): + self.inpublisher = 1 + self.push('publisher', 0) + + def _end_itunes_owner(self): + self.pop('publisher') + self.inpublisher = 0 + self._sync_author_detail('publisher') + + def _start_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('contributor', 0) + + def _end_contributor(self): + self.pop('contributor') + self.incontributor = 0 + + def _start_dc_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('name', 0) + + def _end_dc_contributor(self): + self._end_name() + self.incontributor = 0 + + def _start_name(self, attrsD): + self.push('name', 0) + _start_itunes_name = _start_name + + def _end_name(self): + value = self.pop('name') + if self.inpublisher: + self._save_author('name', value, 'publisher') + elif self.inauthor: + self._save_author('name', value) + elif self.incontributor: + self._save_contributor('name', value) + elif self.intextinput: + context = self._getContext() + context['textinput']['name'] = value + _end_itunes_name = _end_name + + def _start_width(self, attrsD): + self.push('width', 0) + + def _end_width(self): + value = self.pop('width') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['width'] = value + + def _start_height(self, attrsD): + self.push('height', 0) + + def _end_height(self): + value = self.pop('height') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['height'] = value + + def _start_url(self, attrsD): + self.push('href', 1) + _start_homepage = _start_url + _start_uri = _start_url + + def _end_url(self): + value = self.pop('href') + if self.inauthor: + self._save_author('href', value) + elif self.incontributor: + self._save_contributor('href', value) + elif self.inimage: + context = self._getContext() + context['image']['href'] = value + elif self.intextinput: + context = self._getContext() + context['textinput']['link'] = value + _end_homepage = _end_url + _end_uri = _end_url + + def _start_email(self, attrsD): + self.push('email', 0) + _start_itunes_email = _start_email + + def _end_email(self): + value = self.pop('email') + if self.inpublisher: + self._save_author('email', value, 'publisher') + elif self.inauthor: + self._save_author('email', value) + elif self.incontributor: + self._save_contributor('email', value) + _end_itunes_email = _end_email + + def _getContext(self): + if self.insource: + context = self.sourcedata + elif self.inentry: + context = self.entries[-1] + else: + context = self.feeddata + return context + + def _save_author(self, key, value, prefix='author'): + context = self._getContext() + context.setdefault(prefix + '_detail', FeedParserDict()) + context[prefix + '_detail'][key] = value + self._sync_author_detail() + + def _save_contributor(self, key, value): + context = self._getContext() + context.setdefault('contributors', [FeedParserDict()]) + context['contributors'][-1][key] = value + + def _sync_author_detail(self, key='author'): + context = self._getContext() + detail = context.get('%s_detail' % key) + if detail: + name = detail.get('name') + email = detail.get('email') + if name and email: + context[key] = '%s (%s)' % (name, email) + elif name: + context[key] = name + elif email: + context[key] = email + else: + author = context.get(key) + if not author: return + emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author) + if not emailmatch: return + email = emailmatch.group(0) + # probably a better way to do the following, but it passes all the tests + author = author.replace(email, '') + author = author.replace('()', '') + author = author.strip() + if author and (author[0] == '('): + author = author[1:] + if author and (author[-1] == ')'): + author = author[:-1] + author = author.strip() + context.setdefault('%s_detail' % key, FeedParserDict()) + context['%s_detail' % key]['name'] = author + context['%s_detail' % key]['email'] = email + + def _start_subtitle(self, attrsD): + self.pushContent('subtitle', attrsD, 'text/plain', 1) + _start_tagline = _start_subtitle + _start_itunes_subtitle = _start_subtitle + + def _end_subtitle(self): + self.popContent('subtitle') + _end_tagline = _end_subtitle + _end_itunes_subtitle = _end_subtitle + + def _start_rights(self, attrsD): + self.pushContent('rights', attrsD, 'text/plain', 1) + _start_dc_rights = _start_rights + _start_copyright = _start_rights + + def _end_rights(self): + self.popContent('rights') + _end_dc_rights = _end_rights + _end_copyright = _end_rights + + def _start_item(self, attrsD): + self.entries.append(FeedParserDict()) + self.push('item', 0) + self.inentry = 1 + self.guidislink = 0 + id = self._getAttribute(attrsD, 'rdf:about') + if id: + context = self._getContext() + context['id'] = id + self._cdf_common(attrsD) + _start_entry = _start_item + _start_product = _start_item + + def _end_item(self): + self.pop('item') + self.inentry = 0 + _end_entry = _end_item + + def _start_dc_language(self, attrsD): + self.push('language', 1) + _start_language = _start_dc_language + + def _end_dc_language(self): + self.lang = self.pop('language') + _end_language = _end_dc_language + + def _start_dc_publisher(self, attrsD): + self.push('publisher', 1) + _start_webmaster = _start_dc_publisher + + def _end_dc_publisher(self): + self.pop('publisher') + self._sync_author_detail('publisher') + _end_webmaster = _end_dc_publisher + + def _start_published(self, attrsD): + self.push('published', 1) + _start_dcterms_issued = _start_published + _start_issued = _start_published + + def _end_published(self): + value = self.pop('published') + self._save('published_parsed', _parse_date(value)) + _end_dcterms_issued = _end_published + _end_issued = _end_published + + def _start_updated(self, attrsD): + self.push('updated', 1) + _start_modified = _start_updated + _start_dcterms_modified = _start_updated + _start_pubdate = _start_updated + _start_dc_date = _start_updated + + def _end_updated(self): + value = self.pop('updated') + parsed_value = _parse_date(value) + self._save('updated_parsed', parsed_value) + _end_modified = _end_updated + _end_dcterms_modified = _end_updated + _end_pubdate = _end_updated + _end_dc_date = _end_updated + + def _start_created(self, attrsD): + self.push('created', 1) + _start_dcterms_created = _start_created + + def _end_created(self): + value = self.pop('created') + self._save('created_parsed', _parse_date(value)) + _end_dcterms_created = _end_created + + def _start_expirationdate(self, attrsD): + self.push('expired', 1) + + def _end_expirationdate(self): + self._save('expired_parsed', _parse_date(self.pop('expired'))) + + def _start_cc_license(self, attrsD): + self.push('license', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('license') + + def _start_creativecommons_license(self, attrsD): + self.push('license', 1) + + def _end_creativecommons_license(self): + self.pop('license') + + def _addTag(self, term, scheme, label): + context = self._getContext() + tags = context.setdefault('tags', []) + if (not term) and (not scheme) and (not label): return + value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) + if value not in tags: + tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) + + def _start_category(self, attrsD): + if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) + term = attrsD.get('term') + scheme = attrsD.get('scheme', attrsD.get('domain')) + label = attrsD.get('label') + self._addTag(term, scheme, label) + self.push('category', 1) + _start_dc_subject = _start_category + _start_keywords = _start_category + + def _end_itunes_keywords(self): + for term in self.pop('itunes_keywords').split(): + self._addTag(term, 'http://www.itunes.com/', None) + + def _start_itunes_category(self, attrsD): + self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) + self.push('category', 1) + + def _end_category(self): + value = self.pop('category') + if not value: return + context = self._getContext() + tags = context['tags'] + if value and len(tags) and not tags[-1]['term']: + tags[-1]['term'] = value + else: + self._addTag(value, None, None) + _end_dc_subject = _end_category + _end_keywords = _end_category + _end_itunes_category = _end_category + + def _start_cloud(self, attrsD): + self._getContext()['cloud'] = FeedParserDict(attrsD) + + def _start_link(self, attrsD): + attrsD.setdefault('rel', 'alternate') + attrsD.setdefault('type', 'text/html') + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + expectingText = self.infeed or self.inentry or self.insource + context = self._getContext() + context.setdefault('links', []) + context['links'].append(FeedParserDict(attrsD)) + if attrsD['rel'] == 'enclosure': + self._start_enclosure(attrsD) + if attrsD.has_key('href'): + expectingText = 0 + if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): + context['link'] = attrsD['href'] + else: + self.push('link', expectingText) + _start_producturl = _start_link + + def _end_link(self): + value = self.pop('link') + context = self._getContext() + if self.intextinput: + context['textinput']['link'] = value + if self.inimage: + context['image']['link'] = value + _end_producturl = _end_link + + def _start_guid(self, attrsD): + self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') + self.push('id', 1) + + def _end_guid(self): + value = self.pop('id') + self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) + if self.guidislink: + # guid acts as link, but only if 'ispermalink' is not present or is 'true', + # and only if the item doesn't already have a link element + self._save('link', value) + + def _start_title(self, attrsD): + self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + _start_dc_title = _start_title + _start_media_title = _start_title + + def _end_title(self): + value = self.popContent('title') + context = self._getContext() + if self.intextinput: + context['textinput']['title'] = value + elif self.inimage: + context['image']['title'] = value + _end_dc_title = _end_title + _end_media_title = _end_title + + def _start_description(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) + + def _start_abstract(self, attrsD): + self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + + def _end_description(self): + if self._summaryKey == 'content': + self._end_content() + else: + value = self.popContent('description') + context = self._getContext() + if self.intextinput: + context['textinput']['description'] = value + elif self.inimage: + context['image']['description'] = value + self._summaryKey = None + _end_abstract = _end_description + + def _start_info(self, attrsD): + self.pushContent('info', attrsD, 'text/plain', 1) + _start_feedburner_browserfriendly = _start_info + + def _end_info(self): + self.popContent('info') + _end_feedburner_browserfriendly = _end_info + + def _start_generator(self, attrsD): + if attrsD: + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + self._getContext()['generator_detail'] = FeedParserDict(attrsD) + self.push('generator', 1) + + def _end_generator(self): + value = self.pop('generator') + context = self._getContext() + if context.has_key('generator_detail'): + context['generator_detail']['name'] = value + + def _start_admin_generatoragent(self, attrsD): + self.push('generator', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('generator') + self._getContext()['generator_detail'] = FeedParserDict({'href': value}) + + def _start_admin_errorreportsto(self, attrsD): + self.push('errorreportsto', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('errorreportsto') + + def _start_summary(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self._summaryKey = 'summary' + self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) + _start_itunes_summary = _start_summary + + def _end_summary(self): + if self._summaryKey == 'content': + self._end_content() + else: + self.popContent(self._summaryKey or 'summary') + self._summaryKey = None + _end_itunes_summary = _end_summary + + def _start_enclosure(self, attrsD): + attrsD = self._itsAnHrefDamnIt(attrsD) + self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) + href = attrsD.get('href') + if href: + context = self._getContext() + if not context.get('id'): + context['id'] = href + + def _start_source(self, attrsD): + self.insource = 1 + + def _end_source(self): + self.insource = 0 + self._getContext()['source'] = copy.deepcopy(self.sourcedata) + self.sourcedata.clear() + + def _start_content(self, attrsD): + self.pushContent('content', attrsD, 'text/plain', 1) + src = attrsD.get('src') + if src: + self.contentparams['src'] = src + self.push('content', 1) + + def _start_prodlink(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + + def _start_body(self, attrsD): + self.pushContent('content', attrsD, 'application/xhtml+xml', 1) + _start_xhtml_body = _start_body + + def _start_content_encoded(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + _start_fullitem = _start_content_encoded + + def _end_content(self): + copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) + value = self.popContent('content') + if copyToDescription: + self._save('description', value) + _end_body = _end_content + _end_xhtml_body = _end_content + _end_content_encoded = _end_content + _end_fullitem = _end_content + _end_prodlink = _end_content + + def _start_itunes_image(self, attrsD): + self.push('itunes_image', 0) + self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) + _start_itunes_link = _start_itunes_image + + def _end_itunes_block(self): + value = self.pop('itunes_block', 0) + self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 + + def _end_itunes_explicit(self): + value = self.pop('itunes_explicit', 0) + self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 + +if _XML_AVAILABLE: + class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): + def __init__(self, baseuri, baselang, encoding): + if _debug: sys.stderr.write('trying StrictFeedParser\n') + xml.sax.handler.ContentHandler.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + self.bozo = 0 + self.exc = None + + def startPrefixMapping(self, prefix, uri): + self.trackNamespace(prefix, uri) + + def startElementNS(self, name, qname, attrs): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if lowernamespace.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + namespace = 'http://backend.userland.com/rss' + lowernamespace = namespace + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = None + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): + raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) + + # qname implementation is horribly broken in Python 2.1 (it + # doesn't report any), and slightly broken in Python 2.2 (it + # doesn't report the xml: namespace). So we match up namespaces + # with a known list first, and then possibly override them with + # the qnames the SAX parser gives us (if indeed it gives us any + # at all). Thanks to MatejC for helping me test this and + # tirelessly telling me that it didn't work yet. + attrsD = {} + for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): + lowernamespace = (namespace or '').lower() + prefix = self._matchnamespaces.get(lowernamespace, '') + if prefix: + attrlocalname = prefix + ':' + attrlocalname + attrsD[str(attrlocalname).lower()] = attrvalue + for qname in attrs.getQNames(): + attrsD[str(qname).lower()] = attrs.getValueByQName(qname) + self.unknown_starttag(localname, attrsD.items()) + + def characters(self, text): + self.handle_data(text) + + def endElementNS(self, name, qname): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = '' + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + self.unknown_endtag(localname) + + def error(self, exc): + self.bozo = 1 + self.exc = exc + + def fatalError(self, exc): + self.error(exc) + raise exc + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + def __init__(self, encoding): + self.encoding = encoding + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + def feed(self, data): + data = re.compile(r'', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace + data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) + data = data.replace(''', "'") + data = data.replace('"', '"') + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + sgmllib.SGMLParser.close(self) + + def normalize_attrs(self, attrs): + # utility method to be called by descendants + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
, tag='pre', attrs=[('class', 'screen')]
+        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
+        uattrs = []
+        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
+        for key, value in attrs:
+            if type(value) != type(u''):
+                value = unicode(value, self.encoding)
+            uattrs.append((unicode(key, self.encoding), value))
+        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
+        if tag in self.elements_no_end_tag:
+            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
+        else:
+            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
+
+    def unknown_endtag(self, tag):
+        # called for each end tag, e.g. for 
, tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + import htmlentitydefs + if not hasattr(htmlentitydefs, 'name2codepoint') or htmlentitydefs.name2codepoint.has_key(ref): + self.pieces.append('&%(ref)s;' % locals()) + else: + self.pieces.append('&%(ref)s' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): + def __init__(self, baseuri, baselang, encoding): + sgmllib.SGMLParser.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + + def decodeEntities(self, element, data): + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace('"', '"') + data = data.replace(''', ''') + data = data.replace(''', ''') + if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace(''', "'") + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % t for t in attrs]) + +class _RelativeURIResolver(_BaseHTMLProcessor): + relative_uris = [('a', 'href'), + ('applet', 'codebase'), + ('area', 'href'), + ('blockquote', 'cite'), + ('body', 'background'), + ('del', 'cite'), + ('form', 'action'), + ('frame', 'longdesc'), + ('frame', 'src'), + ('iframe', 'longdesc'), + ('iframe', 'src'), + ('head', 'profile'), + ('img', 'longdesc'), + ('img', 'src'), + ('img', 'usemap'), + ('input', 'src'), + ('input', 'usemap'), + ('ins', 'cite'), + ('link', 'href'), + ('object', 'classid'), + ('object', 'codebase'), + ('object', 'data'), + ('object', 'usemap'), + ('q', 'cite'), + ('script', 'src')] + + def __init__(self, baseuri, encoding): + _BaseHTMLProcessor.__init__(self, encoding) + self.baseuri = baseuri + + def resolveURI(self, uri): + return _urljoin(self.baseuri, uri) + + def unknown_starttag(self, tag, attrs): + attrs = self.normalize_attrs(attrs) + attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + +def _resolveRelativeURIs(htmlSource, baseURI, encoding): + if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') + p = _RelativeURIResolver(baseURI, encoding) + p.feed(htmlSource) + return p.output() + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', + 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', + 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', + 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', + 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', + 'thead', 'tr', 'tt', 'u', 'ul', 'var'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', + 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', + 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', + 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', + 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', + 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', + 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', + 'usemap', 'valign', 'value', 'vspace', 'width', 'xml:lang'] + + unacceptable_elements_with_end_tag = ['script', 'applet'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.unacceptablestack = 0 + + def unknown_starttag(self, tag, attrs): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack += 1 + return + attrs = self.normalize_attrs(attrs) + attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + + def unknown_endtag(self, tag): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack -= 1 + return + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.unacceptablestack: + _BaseHTMLProcessor.handle_data(self, text) + +def _sanitizeHTML(htmlSource, encoding): + p = _HTMLSanitizer(encoding) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count(''): + data = data.split('>', 1)[1] + if data.count('= '2.3.3' + assert base64 != None + user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') + realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] + self.add_password(realm, host, user, passw) + retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) + self.reset_retry_count() + return retry + except: + return self.http_error_default(req, fp, code, msg, headers) + +def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): + """URL, filename, or string --> stream + + This function lets you define parsers that take any input source + (URL, pathname to local or network file, or actual data as a string) + and deal with it in a uniform manner. Returned object is guaranteed + to have all the basic stdio read methods (read, readline, readlines). + Just .close() the object when you're done with it. + + If the etag argument is supplied, it will be used as the value of an + If-None-Match request header. + + If the modified argument is supplied, it must be a tuple of 9 integers + as returned by gmtime() in the standard Python time module. This MUST + be in GMT (Greenwich Mean Time). The formatted date/time will be used + as the value of an If-Modified-Since request header. + + If the agent argument is supplied, it will be used as the value of a + User-Agent request header. + + If the referrer argument is supplied, it will be used as the value of a + Referer[sic] request header. + + If handlers is supplied, it is a list of handlers used to build a + urllib2 opener. + """ + + if hasattr(url_file_stream_or_string, 'read'): + return url_file_stream_or_string + + if url_file_stream_or_string == '-': + return sys.stdin + + if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): + if not agent: + agent = USER_AGENT + # test for inline user:password for basic auth + auth = None + if base64: + urltype, rest = urllib.splittype(url_file_stream_or_string) + realhost, rest = urllib.splithost(rest) + if realhost: + user_passwd, realhost = urllib.splituser(realhost) + if user_passwd: + url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) + auth = base64.encodestring(user_passwd).strip() + # try to open with urllib2 (to use optional headers) + request = urllib2.Request(url_file_stream_or_string) + request.add_header('User-Agent', agent) + if etag: + request.add_header('If-None-Match', etag) + if modified: + # format into an RFC 1123-compliant timestamp. We can't use + # time.strftime() since the %a and %b directives can be affected + # by the current locale, but RFC 2616 states that dates must be + # in English. + short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) + if referrer: + request.add_header('Referer', referrer) + if gzip and zlib: + request.add_header('Accept-encoding', 'gzip, deflate') + elif gzip: + request.add_header('Accept-encoding', 'gzip') + elif zlib: + request.add_header('Accept-encoding', 'deflate') + else: + request.add_header('Accept-encoding', '') + if auth: + request.add_header('Authorization', 'Basic %s' % auth) + if ACCEPT_HEADER: + request.add_header('Accept', ACCEPT_HEADER) + request.add_header('A-IM', 'feed') # RFC 3229 support + opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) + opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent + try: + return opener.open(request) + finally: + opener.close() # JohnD + + # try to open with native open function (if url_file_stream_or_string is a filename) + try: + return open(url_file_stream_or_string) + except: + pass + + # treat url_file_stream_or_string as string + return _StringIO(str(url_file_stream_or_string)) + +_date_handlers = [] +def registerDateHandler(func): + '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' + _date_handlers.insert(0, func) + +# ISO-8601 date parsing routines written by Fazal Majid. +# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 +# parser is beyond the scope of feedparser and would be a worthwhile addition +# to the Python library. +# A single regular expression cannot parse ISO 8601 date formats into groups +# as the standard is highly irregular (for instance is 030104 2003-01-04 or +# 0301-04-01), so we use templates instead. +# Please note the order in templates is significant because we need a +# greedy match. +_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', + 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', + '-YY-?MM', '-OOO', '-YY', + '--MM-?DD', '--MM', + '---DD', + 'CC', ''] +_iso8601_re = [ + tmpl.replace( + 'YYYY', r'(?P\d{4})').replace( + 'YY', r'(?P\d\d)').replace( + 'MM', r'(?P[01]\d)').replace( + 'DD', r'(?P[0123]\d)').replace( + 'OOO', r'(?P[0123]\d\d)').replace( + 'CC', r'(?P\d\d$)') + + r'(T?(?P\d{2}):(?P\d{2})' + + r'(:(?P\d{2}))?' + + r'(?P[+-](?P\d{2})(:(?P\d{2}))?|Z)?)?' + for tmpl in _iso8601_tmpl] +del tmpl +_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] +del regex +def _parse_date_iso8601(dateString): + '''Parse a variety of ISO-8601-compatible formats like 20040105''' + m = None + for _iso8601_match in _iso8601_matches: + m = _iso8601_match(dateString) + if m: break + if not m: return + if m.span() == (0, 0): return + params = m.groupdict() + ordinal = params.get('ordinal', 0) + if ordinal: + ordinal = int(ordinal) + else: + ordinal = 0 + year = params.get('year', '--') + if not year or year == '--': + year = time.gmtime()[0] + elif len(year) == 2: + # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 + year = 100 * int(time.gmtime()[0] / 100) + int(year) + else: + year = int(year) + month = params.get('month', '-') + if not month or month == '-': + # ordinals are NOT normalized by mktime, we simulate them + # by setting month=1, day=ordinal + if ordinal: + month = 1 + else: + month = time.gmtime()[1] + month = int(month) + day = params.get('day', 0) + if not day: + # see above + if ordinal: + day = ordinal + elif params.get('century', 0) or \ + params.get('year', 0) or params.get('month', 0): + day = 1 + else: + day = time.gmtime()[2] + else: + day = int(day) + # special case of the century - is the first year of the 21st century + # 2000 or 2001 ? The debate goes on... + if 'century' in params.keys(): + year = (int(params['century']) - 1) * 100 + 1 + # in ISO 8601 most fields are optional + for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: + if not params.get(field, None): + params[field] = 0 + hour = int(params.get('hour', 0)) + minute = int(params.get('minute', 0)) + second = int(params.get('second', 0)) + # weekday is normalized by mktime(), we can ignore it + weekday = 0 + # daylight savings is complex, but not needed for feedparser's purposes + # as time zones, if specified, include mention of whether it is active + # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and + # and most implementations have DST bugs + daylight_savings_flag = 0 + tm = [year, month, day, hour, minute, second, weekday, + ordinal, daylight_savings_flag] + # ISO 8601 time zone adjustments + tz = params.get('tz') + if tz and tz != 'Z': + if tz[0] == '-': + tm[3] += int(params.get('tzhour', 0)) + tm[4] += int(params.get('tzmin', 0)) + elif tz[0] == '+': + tm[3] -= int(params.get('tzhour', 0)) + tm[4] -= int(params.get('tzmin', 0)) + else: + return None + # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) + # which is guaranteed to normalize d/m/y/h/m/s. + # Many implementations have bugs, but we'll pretend they don't. + return time.localtime(time.mktime(tm)) +registerDateHandler(_parse_date_iso8601) + +# 8-bit date handling routines written by ytrewq1. +_korean_year = u'\ub144' # b3e2 in euc-kr +_korean_month = u'\uc6d4' # bff9 in euc-kr +_korean_day = u'\uc77c' # c0cf in euc-kr +_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr +_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr + +_korean_onblog_date_re = \ + re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ + (_korean_year, _korean_month, _korean_day)) +_korean_nate_date_re = \ + re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ + (_korean_am, _korean_pm)) +def _parse_date_onblog(dateString): + '''Parse a string according to the OnBlog 8-bit date format''' + m = _korean_onblog_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_onblog) + +def _parse_date_nate(dateString): + '''Parse a string according to the Nate 8-bit date format''' + m = _korean_nate_date_re.match(dateString) + if not m: return + hour = int(m.group(5)) + ampm = m.group(4) + if (ampm == _korean_pm): + hour += 12 + hour = str(hour) + if len(hour) == 1: + hour = '0' + hour + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_nate) + +_mssql_date_re = \ + re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') +def _parse_date_mssql(dateString): + '''Parse a string according to the MS SQL date format''' + m = _mssql_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_mssql) + +# Unicode strings for Greek date strings +_greek_months = \ + { \ + u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 + u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 + u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 + u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 + u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 + u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 + u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 + u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 + u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 + u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 + u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 + u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 + u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 + u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 + u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 + u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 + u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 + } + +_greek_wdays = \ + { \ + u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 + u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 + u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 + u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 + u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 + u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 + u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 + } + +_greek_date_format_re = \ + re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') + +def _parse_date_greek(dateString): + '''Parse a string according to a Greek 8-bit date format.''' + m = _greek_date_format_re.match(dateString) + if not m: return + try: + wday = _greek_wdays[m.group(1)] + month = _greek_months[m.group(3)] + except: + return + rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ + {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ + 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': m.group(8)} + if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) + return _parse_date_rfc822(rfc822date) +registerDateHandler(_parse_date_greek) + +# Unicode strings for Hungarian date strings +_hungarian_months = \ + { \ + u'janu\u00e1r': u'01', # e1 in iso-8859-2 + u'febru\u00e1ri': u'02', # e1 in iso-8859-2 + u'm\u00e1rcius': u'03', # e1 in iso-8859-2 + u'\u00e1prilis': u'04', # e1 in iso-8859-2 + u'm\u00e1ujus': u'05', # e1 in iso-8859-2 + u'j\u00fanius': u'06', # fa in iso-8859-2 + u'j\u00falius': u'07', # fa in iso-8859-2 + u'augusztus': u'08', + u'szeptember': u'09', + u'okt\u00f3ber': u'10', # f3 in iso-8859-2 + u'november': u'11', + u'december': u'12', + } + +_hungarian_date_format_re = \ + re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') + +def _parse_date_hungarian(dateString): + '''Parse a string according to a Hungarian 8-bit date format.''' + m = _hungarian_date_format_re.match(dateString) + if not m: return + try: + month = _hungarian_months[m.group(2)] + day = m.group(3) + if len(day) == 1: + day = '0' + day + hour = m.group(4) + if len(hour) == 1: + hour = '0' + hour + except: + return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ + {'year': m.group(1), 'month': month, 'day': day,\ + 'hour': hour, 'minute': m.group(5),\ + 'zonediff': m.group(6)} + if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_hungarian) + +# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by +# Drake and licensed under the Python license. Removed all range checking +# for month, day, hour, minute, and second, since mktime will normalize +# these later +def _parse_date_w3dtf(dateString): + def __extract_date(m): + year = int(m.group('year')) + if year < 100: + year = 100 * int(time.gmtime()[0] / 100) + int(year) + if year < 1000: + return 0, 0, 0 + julian = m.group('julian') + if julian: + julian = int(julian) + month = julian / 30 + 1 + day = julian % 30 + 1 + jday = None + while jday != julian: + t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) + jday = time.gmtime(t)[-2] + diff = abs(jday - julian) + if jday > julian: + if diff < day: + day = day - diff + else: + month = month - 1 + day = 31 + elif jday < julian: + if day + diff < 28: + day = day + diff + else: + month = month + 1 + return year, month, day + month = m.group('month') + day = 1 + if month is None: + month = 1 + else: + month = int(month) + day = m.group('day') + if day: + day = int(day) + else: + day = 1 + return year, month, day + + def __extract_time(m): + if not m: + return 0, 0, 0 + hours = m.group('hours') + if not hours: + return 0, 0, 0 + hours = int(hours) + minutes = int(m.group('minutes')) + seconds = m.group('seconds') + if seconds: + seconds = int(seconds) + else: + seconds = 0 + return hours, minutes, seconds + + def __extract_tzd(m): + '''Return the Time Zone Designator as an offset in seconds from UTC.''' + if not m: + return 0 + tzd = m.group('tzd') + if not tzd: + return 0 + if tzd == 'Z': + return 0 + hours = int(m.group('tzdhours')) + minutes = m.group('tzdminutes') + if minutes: + minutes = int(minutes) + else: + minutes = 0 + offset = (hours*60 + minutes) * 60 + if tzd[0] == '+': + return -offset + return offset + + __date_re = ('(?P\d\d\d\d)' + '(?:(?P-|)' + '(?:(?P\d\d\d)' + '|(?P\d\d)(?:(?P=dsep)(?P\d\d))?))?') + __tzd_re = '(?P[-+](?P\d\d)(?::?(?P\d\d))|Z)' + __tzd_rx = re.compile(__tzd_re) + __time_re = ('(?P\d\d)(?P:|)(?P\d\d)' + '(?:(?P=tsep)(?P\d\d(?:[.,]\d+)?))?' + + __tzd_re) + __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) + __datetime_rx = re.compile(__datetime_re) + m = __datetime_rx.match(dateString) + if (m is None) or (m.group() != dateString): return + gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) + if gmt[0] == 0: return + return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) +registerDateHandler(_parse_date_w3dtf) + +def _parse_date_rfc822(dateString): + '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' + data = dateString.split() + if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: + del data[0] + if len(data) == 4: + s = data[3] + i = s.find('+') + if i > 0: + data[3:] = [s[:i], s[i+1:]] + else: + data.append('') + dateString = " ".join(data) + if len(data) < 5: + dateString += ' 00:00:00 GMT' + tm = rfc822.parsedate_tz(dateString) + if tm: + return time.gmtime(rfc822.mktime_tz(tm)) +# rfc822.py defines several time zones, but we define some extra ones. +# 'ET' is equivalent to 'EST', etc. +_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} +rfc822._timezones.update(_additional_timezones) +registerDateHandler(_parse_date_rfc822) + +def _parse_date(dateString): + '''Parses a variety of date formats into a 9-tuple in GMT''' + for handler in _date_handlers: + try: + date9tuple = handler(dateString) + if not date9tuple: continue + if len(date9tuple) != 9: + if _debug: sys.stderr.write('date handler function must return 9-tuple\n') + raise ValueError + map(int, date9tuple) + return date9tuple + except Exception, e: + if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) + pass + return None + +def _getCharacterEncoding(http_headers, xml_data): + '''Get the character encoding of the XML document + + http_headers is a dictionary + xml_data is a raw string (not Unicode) + + This is so much trickier than it sounds, it's not even funny. + According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type + is application/xml, application/*+xml, + application/xml-external-parsed-entity, or application/xml-dtd, + the encoding given in the charset parameter of the HTTP Content-Type + takes precedence over the encoding given in the XML prefix within the + document, and defaults to 'utf-8' if neither are specified. But, if + the HTTP Content-Type is text/xml, text/*+xml, or + text/xml-external-parsed-entity, the encoding given in the XML prefix + within the document is ALWAYS IGNORED and only the encoding given in + the charset parameter of the HTTP Content-Type header should be + respected, and it defaults to 'us-ascii' if not specified. + + Furthermore, discussion on the atom-syntax mailing list with the + author of RFC 3023 leads me to the conclusion that any document + served with a Content-Type of text/* and no charset parameter + must be treated as us-ascii. (We now do this.) And also that it + must always be flagged as non-well-formed. (We now do this too.) + + If Content-Type is unspecified (input was local file or non-HTTP source) + or unrecognized (server just got it totally wrong), then go by the + encoding given in the XML prefix of the document and default to + 'iso-8859-1' as per the HTTP specification (RFC 2616). + + Then, assuming we didn't find a character encoding in the HTTP headers + (and the HTTP Content-type allowed us to look in the body), we need + to sniff the first few bytes of the XML data and try to determine + whether the encoding is ASCII-compatible. Section F of the XML + specification shows the way here: + http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + + If the sniffed encoding is not ASCII-compatible, we need to make it + ASCII compatible so that we can sniff further into the XML declaration + to find the encoding attribute, which will tell us the true encoding. + + Of course, none of this guarantees that we will be able to parse the + feed in the declared character encoding (assuming it was declared + correctly, which many are not). CJKCodecs and iconv_codec help a lot; + you should definitely install them if you can. + http://cjkpython.i18n.org/ + ''' + + def _parseHTTPContentType(content_type): + '''takes HTTP Content-Type header and returns (content type, charset) + + If no charset is specified, returns (content type, '') + If no content type is specified, returns ('', '') + Both return parameters are guaranteed to be lowercase strings + ''' + content_type = content_type or '' + content_type, params = cgi.parse_header(content_type) + return content_type, params.get('charset', '').replace("'", '') + + sniffed_xml_encoding = '' + xml_encoding = '' + true_encoding = '' + http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) + # Must sniff for non-ASCII-compatible character encodings before + # searching for XML declaration. This heuristic is defined in + # section F of the XML specification: + # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + try: + if xml_data[:4] == '\x4c\x6f\xa7\x94': + # EBCDIC + xml_data = _ebcdic_to_ascii(xml_data) + elif xml_data[:4] == '\x00\x3c\x00\x3f': + # UTF-16BE + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): + # UTF-16BE with BOM + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x3f\x00': + # UTF-16LE + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): + # UTF-16LE with BOM + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\x00\x3c': + # UTF-32BE + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x00\x00': + # UTF-32LE + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\xfe\xff': + # UTF-32BE with BOM + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\xff\xfe\x00\x00': + # UTF-32LE with BOM + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') + elif xml_data[:3] == '\xef\xbb\xbf': + # UTF-8 with BOM + sniffed_xml_encoding = 'utf-8' + xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') + else: + # ASCII-compatible + pass + xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) + except: + xml_encoding_match = None + if xml_encoding_match: + xml_encoding = xml_encoding_match.groups()[0].lower() + if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): + xml_encoding = sniffed_xml_encoding + acceptable_content_type = 0 + application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') + text_content_types = ('text/xml', 'text/xml-external-parsed-entity') + if (http_content_type in application_content_types) or \ + (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): + acceptable_content_type = 1 + true_encoding = http_encoding or xml_encoding or 'utf-8' + elif (http_content_type in text_content_types) or \ + (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): + acceptable_content_type = 1 + true_encoding = http_encoding or 'us-ascii' + elif http_content_type.startswith('text/'): + true_encoding = http_encoding or 'us-ascii' + elif http_headers and (not http_headers.has_key('content-type')): + true_encoding = xml_encoding or 'iso-8859-1' + else: + true_encoding = xml_encoding or 'utf-8' + return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type + +def _toUTF8(data, encoding): + '''Changes an XML data stream on the fly to specify a new encoding + + data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already + encoding is a string recognized by encodings.aliases + ''' + if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) + # strip Byte Order Mark (if present) + if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16be': + sys.stderr.write('trying utf-16be instead\n') + encoding = 'utf-16be' + data = data[2:] + elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16le': + sys.stderr.write('trying utf-16le instead\n') + encoding = 'utf-16le' + data = data[2:] + elif data[:3] == '\xef\xbb\xbf': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-8': + sys.stderr.write('trying utf-8 instead\n') + encoding = 'utf-8' + data = data[3:] + elif data[:4] == '\x00\x00\xfe\xff': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32be': + sys.stderr.write('trying utf-32be instead\n') + encoding = 'utf-32be' + data = data[4:] + elif data[:4] == '\xff\xfe\x00\x00': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32le': + sys.stderr.write('trying utf-32le instead\n') + encoding = 'utf-32le' + data = data[4:] + newdata = unicode(data, encoding) + if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) + declmatch = re.compile('^<\?xml[^>]*?>') + newdecl = '''''' + if declmatch.search(newdata): + newdata = declmatch.sub(newdecl, newdata) + else: + newdata = newdecl + u'\n' + newdata + return newdata.encode('utf-8') + +def _stripDoctype(data): + '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) + + rss_version may be 'rss091n' or None + stripped_data is the same XML document, minus the DOCTYPE + ''' + entity_pattern = re.compile(r']*?)>', re.MULTILINE) + data = entity_pattern.sub('', data) + doctype_pattern = re.compile(r']*?)>', re.MULTILINE) + doctype_results = doctype_pattern.findall(data) + doctype = doctype_results and doctype_results[0] or '' + if doctype.lower().count('netscape'): + version = 'rss091n' + else: + version = None + data = doctype_pattern.sub('', data) + return version, data + +def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): + '''Parse a feed from a URL, file, stream, or string''' + result = FeedParserDict() + result['feed'] = FeedParserDict() + result['entries'] = [] + if _XML_AVAILABLE: + result['bozo'] = 0 + if type(handlers) == types.InstanceType: + handlers = [handlers] + try: + f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) + data = f.read() + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + f = None + + # if feed is gzip-compressed, decompress it + if f and data and hasattr(f, 'headers'): + if gzip and f.headers.get('content-encoding', '') == 'gzip': + try: + data = gzip.GzipFile(fileobj=_StringIO(data)).read() + except Exception, e: + # Some feeds claim to be gzipped but they're not, so + # we get garbage. Ideally, we should re-request the + # feed without the 'Accept-encoding: gzip' header, + # but we don't. + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + elif zlib and f.headers.get('content-encoding', '') == 'deflate': + try: + data = zlib.decompress(data, -zlib.MAX_WBITS) + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + + # save HTTP headers + if hasattr(f, 'info'): + info = f.info() + result['etag'] = info.getheader('ETag') + last_modified = info.getheader('Last-Modified') + if last_modified: + result['modified'] = _parse_date(last_modified) + if hasattr(f, 'url'): + result['href'] = f.url + result['status'] = 200 + if hasattr(f, 'status'): + result['status'] = f.status + if hasattr(f, 'headers'): + result['headers'] = f.headers.dict + if hasattr(f, 'close'): + f.close() + + # there are four encodings to keep track of: + # - http_encoding is the encoding declared in the Content-Type HTTP header + # - xml_encoding is the encoding declared in the ; changed +# project name +#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); +# removed unnecessary urllib code -- urllib2 should always be available anyway; +# return actual url, status, and full HTTP headers (as result['url'], +# result['status'], and result['headers']) if parsing a remote feed over HTTP -- +# this should pass all the HTTP tests at ; +# added the latest namespace-of-the-week for RSS 2.0 +#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom +# User-Agent (otherwise urllib2 sends two, which confuses some servers) +#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for +# inline and as used in some RSS 2.0 feeds +#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or +# textInput, and also to return the character encoding (if specified) +#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking +# nested divs within content (JohnD); fixed missing sys import (JohanS); +# fixed regular expression to capture XML character encoding (Andrei); +# added support for Atom 0.3-style links; fixed bug with textInput tracking; +# added support for cloud (MartijnP); added support for multiple +# category/dc:subject (MartijnP); normalize content model: 'description' gets +# description (which can come from description, summary, or full content if no +# description), 'content' gets dict of base/language/type/value (which can come +# from content:encoded, xhtml:body, content, or fullitem); +# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang +# tracking; fixed bug tracking unknown tags; fixed bug tracking content when +# element is not in default namespace (like Pocketsoap feed); +# resolve relative URLs in link, guid, docs, url, comments, wfw:comment, +# wfw:commentRSS; resolve relative URLs within embedded HTML markup in +# description, xhtml:body, content, content:encoded, title, subtitle, +# summary, info, tagline, and copyright; added support for pingback and +# trackback namespaces +#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback +# namespaces, as opposed to 2.6 when I said I did but didn't really; +# sanitize HTML markup within some elements; added mxTidy support (if +# installed) to tidy HTML markup within some elements; fixed indentation +# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available +# (FazalM); universal date parsing and normalization (FazalM): 'created', modified', +# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', +# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' +# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa +#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory +# leak not closing url opener (JohnD); added dc:publisher support (MarekK); +# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) +#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed
tags in +# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); +# fixed relative URI processing for guid (skadz); added ICBM support; added +# base64 support +#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many +# blogspot.com sites); added _debug variable +#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing +#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); +# added several new supported namespaces; fixed bug tracking naked markup in +# description; added support for enclosure; added support for source; re-added +# support for cloud which got dropped somehow; added support for expirationDate +#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking +# xml:base URI, one for documents that don't define one explicitly and one for +# documents that define an outer and an inner xml:base that goes out of scope +# before the end of the document +#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level +#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] +# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; +# added support for creativeCommons:license and cc:license; added support for +# full Atom content model in title, tagline, info, copyright, summary; fixed bug +# with gzip encoding (not always telling server we support it when we do) +#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail +# (dictionary of 'name', 'url', 'email'); map author to author_detail if author +# contains name + email address +#3.0b8 - 1/28/2004 - MAP - added support for contributor +#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added +# support for summary +#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from +# xml.util.iso8601 +#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain +# dangerous markup; fiddled with decodeEntities (not right); liberalized +# date parsing even further +#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); +# added support to Atom 0.2 subtitle; added support for Atom content model +# in copyright; better sanitizing of dangerous HTML elements with end tags +# (script, frameset) +#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, +# etc.) in embedded markup, in either HTML or XHTML form (
,
,
) +#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under +# Python 2.1 +#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; +# fixed bug capturing author and contributor URL; fixed bug resolving relative +# links in author and contributor URL; fixed bug resolvin relative links in +# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's +# namespace tests, and included them permanently in the test suite with his +# permission; fixed namespace handling under Python 2.1 +#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) +#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 +#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); +# use libxml2 (if available) +#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author +# name was in parentheses; removed ultra-problematic mxTidy support; patch to +# workaround crash in PyXML/expat when encountering invalid entities +# (MarkMoraes); support for textinput/textInput +#3.0b20 - 4/7/2004 - MAP - added CDF support +#3.0b21 - 4/14/2004 - MAP - added Hot RSS support +#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in +# results dict; changed results dict to allow getting values with results.key +# as well as results[key]; work around embedded illformed HTML with half +# a DOCTYPE; work around malformed Content-Type header; if character encoding +# is wrong, try several common ones before falling back to regexes (if this +# works, bozo_exception is set to CharacterEncodingOverride); fixed character +# encoding issues in BaseHTMLProcessor by tracking encoding and converting +# from Unicode to raw strings before feeding data to sgmllib.SGMLParser; +# convert each value in results to Unicode (if possible), even if using +# regex-based parsing +#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain +# high-bit characters in attributes in embedded HTML in description (thanks +# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in +# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking +# about a mapped key +#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and +# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could +# cause the same encoding to be tried twice (even if it failed the first time); +# fixed DOCTYPE stripping when DOCTYPE contained entity declarations; +# better textinput and image tracking in illformed RSS 1.0 feeds +#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed +# my blink tag tests +#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that +# failed to parse utf-16 encoded feeds; made source into a FeedParserDict; +# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; +# added support for image; refactored parse() fallback logic to try other +# encodings if SAX parsing fails (previously it would only try other encodings +# if re-encoding failed); remove unichr madness in normalize_attrs now that +# we're properly tracking encoding in and out of BaseHTMLProcessor; set +# feed.language from root-level xml:lang; set entry.id from rdf:about; +# send Accept header +#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between +# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are +# windows-1252); fixed regression that could cause the same encoding to be +# tried twice (even if it failed the first time) +#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; +# recover from malformed content-type header parameter with no equals sign +# ('text/xml; charset:iso-8859-1') +#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities +# to Unicode equivalents in illformed feeds (aaronsw); added and +# passed tests for converting character entities to Unicode equivalents +# in illformed feeds (aaronsw); test for valid parsers when setting +# XML_AVAILABLE; make version and encoding available when server returns +# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like +# digest auth or proxy support); add code to parse username/password +# out of url and send as basic authentication; expose downloading-related +# exceptions in bozo_exception (aaronsw); added __contains__ method to +# FeedParserDict (aaronsw); added publisher_detail (aaronsw) +#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always +# convert feed to UTF-8 before passing to XML parser; completely revamped +# logic for determining character encoding and attempting XML parsing +# (much faster); increased default timeout to 20 seconds; test for presence +# of Location header on redirects; added tests for many alternate character +# encodings; support various EBCDIC encodings; support UTF-16BE and +# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support +# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no +# XML parsers are available; added support for 'Content-encoding: deflate'; +# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules +# are available +#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure +# problem tracking xml:base and xml:lang if element declares it, child +# doesn't, first grandchild redeclares it, and second grandchild doesn't; +# refactored date parsing; defined public registerDateHandler so callers +# can add support for additional date formats at runtime; added support +# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added +# zopeCompatibilityHack() which turns FeedParserDict into a regular +# dictionary, required for Zope compatibility, and also makes command- +# line debugging easier because pprint module formats real dictionaries +# better than dictionary-like objects; added NonXMLContentType exception, +# which is stored in bozo_exception when a feed is served with a non-XML +# media type such as 'text/plain'; respect Content-Language as default +# language if not xml:lang is present; cloud dict is now FeedParserDict; +# generator dict is now FeedParserDict; better tracking of xml:lang, +# including support for xml:lang='' to unset the current language; +# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default +# namespace; don't overwrite final status on redirects (scenarios: +# redirecting to a URL that returns 304, redirecting to a URL that +# redirects to another URL with a different type of redirect); add +# support for HTTP 303 redirects +#4.0 - MAP - support for relative URIs in xml:base attribute; fixed +# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; +# support for Atom 1.0; support for iTunes extensions; new 'tags' for +# categories/keywords/etc. as array of dict +# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 +# terminology; parse RFC 822-style dates with no time; lots of other +# bug fixes +#4.1 - MAP - removed socket timeout; added support for chardet library diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/htmltmpl.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/htmltmpl.py new file mode 100755 index 0000000..be6e41b --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/htmltmpl.py @@ -0,0 +1,1480 @@ + +""" A templating engine for separation of code and HTML. + + The documentation of this templating engine is separated to two parts: + + 1. Description of the templating language. + + 2. Documentation of classes and API of this module that provides + a Python implementation of the templating language. + + All the documentation can be found in 'doc' directory of the + distribution tarball or at the homepage of the engine. + Latest versions of this module are also available at that website. + + You can use and redistribute this module under conditions of the + GNU General Public License that can be found either at + [ http://www.gnu.org/ ] or in file "LICENSE" contained in the + distribution tarball of this module. + + Copyright (c) 2001 Tomas Styblo, tripie@cpan.org + + @name htmltmpl + @version 1.22 + @author-name Tomas Styblo + @author-email tripie@cpan.org + @website http://htmltmpl.sourceforge.net/ + @license-name GNU GPL + @license-url http://www.gnu.org/licenses/gpl.html +""" + +__version__ = 1.22 +__author__ = "Tomas Styblo (tripie@cpan.org)" + +# All imported modules are part of the standard Python library. + +from types import * +import re +import os +import os.path +import pprint # only for debugging +import sys +import copy +import cgi # for HTML escaping of variables +import urllib # for URL escaping of variables +import cPickle # for template compilation +import gettext + +INCLUDE_DIR = "inc" + +# Total number of possible parameters. +# Increment if adding a parameter to any statement. +PARAMS_NUMBER = 3 + +# Relative positions of parameters in TemplateCompiler.tokenize(). +PARAM_NAME = 1 +PARAM_ESCAPE = 2 +PARAM_GLOBAL = 3 +PARAM_GETTEXT_STRING = 1 + +# Find a way to lock files. Currently implemented only for UNIX and windows. +LOCKTYPE_FCNTL = 1 +LOCKTYPE_MSVCRT = 2 +LOCKTYPE = None +try: + import fcntl +except: + try: + import msvcrt + except: + LOCKTYPE = None + else: + LOCKTYPE = LOCKTYPE_MSVCRT +else: + LOCKTYPE = LOCKTYPE_FCNTL +LOCK_EX = 1 +LOCK_SH = 2 +LOCK_UN = 3 + +############################################## +# CLASS: TemplateManager # +############################################## + +class TemplateManager: + """ Class that manages compilation and precompilation of templates. + + You should use this class whenever you work with templates + that are stored in a file. The class can create a compiled + template and transparently manage its precompilation. It also + keeps the precompiled templates up-to-date by modification times + comparisons. + """ + + def __init__(self, include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0): + """ Constructor. + + @header + __init__(include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0) + + @param include Enable or disable included templates. + This optional parameter can be used to enable or disable + TMPL_INCLUDE inclusion of templates. Disabling of + inclusion can improve performance a bit. The inclusion is + enabled by default. + + @param max_include Maximum depth of nested inclusions. + This optional parameter can be used to specify maximum depth of + nested TMPL_INCLUDE inclusions. It defaults to 5. + This setting prevents infinite recursive inclusions. + + @param precompile Enable or disable precompilation of templates. + This optional parameter can be used to enable or disable + creation and usage of precompiled templates. + + A precompiled template is saved to the same directory in + which the main template file is located. You need write + permissions to that directory. + + Precompilation provides a significant performance boost because + it's not necessary to parse the templates over and over again. + The boost is especially noticeable when templates that include + other templates are used. + + Comparison of modification times of the main template and all + included templates is used to ensure that the precompiled + templates are up-to-date. Templates are also recompiled if the + htmltmpl module is updated. + + The TemplateErrorexception is raised when the precompiled + template cannot be saved. Precompilation is enabled by default. + + Precompilation is available only on UNIX and Windows platforms, + because proper file locking which is necessary to ensure + multitask safe behaviour is platform specific and is not + implemented for other platforms. Attempts to enable precompilation + on the other platforms result in raise of the + TemplateError exception. + + @param comments Enable or disable template comments. + This optional parameter can be used to enable or disable + template comments. + Disabling of the comments can improve performance a bit. + Comments are enabled by default. + + @param gettext Enable or disable gettext support. + + @param debug Enable or disable debugging messages. + This optional parameter is a flag that can be used to enable + or disable debugging messages which are printed to the standard + error output. The debugging messages are disabled by default. + """ + # Save the optional parameters. + # These values are not modified by any method. + self._include = include + self._max_include = max_include + self._precompile = precompile + self._comments = comments + self._gettext = gettext + self._debug = debug + + # Find what module to use to lock files. + # File locking is necessary for the 'precompile' feature to be + # multitask/thread safe. Currently it works only on UNIX + # and Windows. Anyone willing to implement it on Mac ? + if precompile and not LOCKTYPE: + raise TemplateError, "Template precompilation is not "\ + "available on this platform." + self.DEB("INIT DONE") + + def prepare(self, file): + """ Preprocess, parse, tokenize and compile the template. + + If precompilation is enabled then this method tries to load + a precompiled form of the template from the same directory + in which the template source file is located. If it succeeds, + then it compares modification times stored in the precompiled + form to modification times of source files of the template, + including source files of all templates included via the + TMPL_INCLUDE statements. If any of the modification times + differs, then the template is recompiled and the precompiled + form updated. + + If precompilation is disabled, then this method parses and + compiles the template. + + @header prepare(file) + + @return Compiled template. + The methods returns an instance of the Template class + which is a compiled form of the template. This instance can be + used as input for the TemplateProcessor. + + @param file Path to the template file to prepare. + The method looks for the template file in current directory + if the parameter is a relative path. All included templates must + be placed in subdirectory 'inc' of the + directory in which the main template file is located. + """ + compiled = None + if self._precompile: + if self.is_precompiled(file): + try: + precompiled = self.load_precompiled(file) + except PrecompiledError, template: + print >> sys.stderr, "Htmltmpl: bad precompiled "\ + "template '%s' removed" % template + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + precompiled.debug(self._debug) + compile_params = (self._include, self._max_include, + self._comments, self._gettext) + if precompiled.is_uptodate(compile_params): + self.DEB("PRECOMPILED: UPTODATE") + compiled = precompiled + else: + self.DEB("PRECOMPILED: NOT UPTODATE") + compiled = self.update(precompiled) + else: + self.DEB("PRECOMPILED: NOT PRECOMPILED") + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + self.DEB("PRECOMPILATION DISABLED") + compiled = self.compile(file) + return compiled + + def update(self, template): + """ Update (recompile) a compiled template. + + This method recompiles a template compiled from a file. + If precompilation is enabled then the precompiled form saved on + disk is also updated. + + @header update(template) + + @return Recompiled template. + It's ensured that the returned template is up-to-date. + + @param template A compiled template. + This parameter should be an instance of the Template + class, created either by the TemplateManager or by the + TemplateCompiler. The instance must represent a template + compiled from a file on disk. + """ + self.DEB("UPDATE") + updated = self.compile(template.file()) + if self._precompile: + self.save_precompiled(updated) + return updated + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def lock_file(self, file, lock): + """ Provide platform independent file locking. + @hidden + """ + fd = file.fileno() + if LOCKTYPE == LOCKTYPE_FCNTL: + if lock == LOCK_SH: + fcntl.flock(fd, fcntl.LOCK_SH) + elif lock == LOCK_EX: + fcntl.flock(fd, fcntl.LOCK_EX) + elif lock == LOCK_UN: + fcntl.flock(fd, fcntl.LOCK_UN) + else: + raise TemplateError, "BUG: bad lock in lock_file" + elif LOCKTYPE == LOCKTYPE_MSVCRT: + if lock == LOCK_SH: + # msvcrt does not support shared locks :-( + msvcrt.locking(fd, msvcrt.LK_LOCK, 1) + elif lock == LOCK_EX: + msvcrt.locking(fd, msvcrt.LK_LOCK, 1) + elif lock == LOCK_UN: + msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) + else: + raise TemplateError, "BUG: bad lock in lock_file" + else: + raise TemplateError, "BUG: bad locktype in lock_file" + + def compile(self, file): + """ Compile the template. + @hidden + """ + return TemplateCompiler(self._include, self._max_include, + self._comments, self._gettext, + self._debug).compile(file) + + def is_precompiled(self, file): + """ Return true if the template is already precompiled on the disk. + This method doesn't check whether the compiled template is + uptodate. + @hidden + """ + filename = file + "c" # "template.tmplc" + if os.path.isfile(filename): + return 1 + else: + return 0 + + def load_precompiled(self, file): + """ Load precompiled template from disk. + + Remove the precompiled template file and recompile it + if the file contains corrupted or unpicklable data. + + @hidden + """ + filename = file + "c" # "template.tmplc" + self.DEB("LOADING PRECOMPILED") + try: + remove_bad = 0 + file = None + try: + file = open(filename, "rb") + self.lock_file(file, LOCK_SH) + precompiled = cPickle.load(file) + except IOError, (errno, errstr): + raise TemplateError, "IO error in load precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.UnpicklingError: + remove_bad = 1 + raise PrecompiledError, filename + except: + remove_bad = 1 + raise + else: + return precompiled + finally: + if file: + self.lock_file(file, LOCK_UN) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + def save_precompiled(self, template): + """ Save compiled template to disk in precompiled form. + + Associated metadata is also saved. It includes: filename of the + main template file, modification time of the main template file, + modification times of all included templates and version of the + htmltmpl module which compiled the template. + + The method removes a file which is saved only partially because + of some error. + + @hidden + """ + filename = template.file() + "c" # creates "template.tmplc" + # Check if we have write permission to the template's directory. + template_dir = os.path.dirname(os.path.abspath(filename)) + if not os.access(template_dir, os.W_OK): + raise TemplateError, "Cannot save precompiled templates "\ + "to '%s': write permission denied."\ + % template_dir + try: + remove_bad = 0 + file = None + try: + file = open(filename, "wb") # may truncate existing file + self.lock_file(file, LOCK_EX) + BINARY = 1 + READABLE = 0 + if self._debug: + cPickle.dump(template, file, READABLE) + else: + cPickle.dump(template, file, BINARY) + except IOError, (errno, errstr): + remove_bad = 1 + raise TemplateError, "IO error while saving precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.PicklingError, error: + remove_bad = 1 + raise TemplateError, "Pickling error while saving "\ + "precompiled template '%s': %s"\ + % (filename, error) + except: + remove_bad = 1 + raise + else: + self.DEB("SAVING PRECOMPILED") + finally: + if file: + self.lock_file(file, LOCK_UN) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + +############################################## +# CLASS: TemplateProcessor # +############################################## + +class TemplateProcessor: + """ Fill the template with data and process it. + + This class provides actual processing of a compiled template. + Use it to set template variables and loops and then obtain + result of the processing. + """ + + def __init__(self, html_escape=1, magic_vars=1, global_vars=0, debug=0): + """ Constructor. + + @header __init__(html_escape=1, magic_vars=1, global_vars=0, + debug=0) + + @param html_escape Enable or disable HTML escaping of variables. + This optional parameter is a flag that can be used to enable or + disable automatic HTML escaping of variables. + All variables are by default automatically HTML escaped. + The escaping process substitutes HTML brackets, ampersands and + double quotes with appropriate HTML entities. + + @param magic_vars Enable or disable loop magic variables. + This parameter can be used to enable or disable + "magic" context variables, that are automatically defined inside + loops. Magic variables are enabled by default. + + Refer to the language specification for description of these + magic variables. + + @param global_vars Globally activate global lookup of variables. + This optional parameter is a flag that can be used to specify + whether variables which cannot be found in the current scope + should be automatically looked up in enclosing scopes. + + Automatic global lookup is disabled by default. Global lookup + can be overriden on a per-variable basis by the + GLOBAL parameter of a TMPL_VAR + statement. + + @param debug Enable or disable debugging messages. + """ + self._html_escape = html_escape + self._magic_vars = magic_vars + self._global_vars = global_vars + self._debug = debug + + # Data structure containing variables and loops set by the + # application. Use debug=1, process some template and + # then check stderr to see how the structure looks. + # It's modified only by set() and reset() methods. + self._vars = {} + + # Following variables are for multipart templates. + self._current_part = 1 + self._current_pos = 0 + + def set(self, var, value): + """ Associate a value with top-level template variable or loop. + + A template identifier can represent either an ordinary variable + (string) or a loop. + + To assign a value to a string identifier pass a scalar + as the 'value' parameter. This scalar will be automatically + converted to string. + + To assign a value to a loop identifier pass a list of mappings as + the 'value' parameter. The engine iterates over this list and + assigns values from the mappings to variables in a template loop + block if a key in the mapping corresponds to a name of a variable + in the loop block. The number of mappings contained in this list + is equal to number of times the loop block is repeated in the + output. + + @header set(var, value) + @return No return value. + + @param var Name of template variable or loop. + @param value The value to associate. + + """ + # The correctness of character case is verified only for top-level + # variables. + if self.is_ordinary_var(value): + # template top-level ordinary variable + if not var.islower(): + raise TemplateError, "Invalid variable name '%s'." % var + elif type(value) == ListType: + # template top-level loop + if var != var.capitalize(): + raise TemplateError, "Invalid loop name '%s'." % var + else: + raise TemplateError, "Value of toplevel variable '%s' must "\ + "be either a scalar or a list." % var + self._vars[var] = value + self.DEB("VALUE SET: " + str(var)) + + def reset(self, keep_data=0): + """ Reset the template data. + + This method resets the data contained in the template processor + instance. The template processor instance can be used to process + any number of templates, but this method must be called after + a template is processed to reuse the instance, + + @header reset(keep_data=0) + @return No return value. + + @param keep_data Do not reset the template data. + Use this flag if you do not want the template data to be erased. + This way you can reuse the data contained in the instance of + the TemplateProcessor. + """ + self._current_part = 1 + self._current_pos = 0 + if not keep_data: + self._vars.clear() + self.DEB("RESET") + + def process(self, template, part=None): + """ Process a compiled template. Return the result as string. + + This method actually processes a template and returns + the result. + + @header process(template, part=None) + @return Result of the processing as string. + + @param template A compiled template. + Value of this parameter must be an instance of the + Template class created either by the + TemplateManager or by the TemplateCompiler. + + @param part The part of a multipart template to process. + This parameter can be used only together with a multipart + template. It specifies the number of the part to process. + It must be greater than zero, because the parts are numbered + from one. + + The parts must be processed in the right order. You + cannot process a part which precedes an already processed part. + + If this parameter is not specified, then the whole template + is processed, or all remaining parts are processed. + """ + self.DEB("APP INPUT:") + if self._debug: pprint.pprint(self._vars, sys.stderr) + if part != None and (part == 0 or part < self._current_part): + raise TemplateError, "process() - invalid part number" + + # This flag means "jump behind the end of current statement" or + # "skip the parameters of current statement". + # Even parameters that actually are not present in the template + # do appear in the list of tokens as empty items ! + skip_params = 0 + + # Stack for enabling or disabling output in response to TMPL_IF, + # TMPL_UNLESS, TMPL_ELSE and TMPL_LOOPs with no passes. + output_control = [] + ENABLE_OUTPUT = 1 + DISABLE_OUTPUT = 0 + + # Stacks for data related to loops. + loop_name = [] # name of a loop + loop_pass = [] # current pass of a loop (counted from zero) + loop_start = [] # index of loop start in token list + loop_total = [] # total number of passes in a loop + + tokens = template.tokens() + len_tokens = len(tokens) + out = "" # buffer for processed output + + # Recover position at which we ended after processing of last part. + i = self._current_pos + + # Process the list of tokens. + while 1: + if i == len_tokens: break + if skip_params: + # Skip the parameters following a statement. + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token.startswith("." + escape = tokens[i + PARAM_ESCAPE] + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + + # If output of current block is not disabled then append + # the substitued and escaped variable to the output. + if DISABLE_OUTPUT not in output_control: + value = str(self.find_value(var, loop_name, loop_pass, + loop_total, globalp)) + out += self.escape(value, escape) + self.DEB("VAR: " + str(var)) + + elif token == "." + skip_params = 1 + + # Find total number of passes in this loop. + passtotal = self.find_value(var, loop_name, loop_pass, + loop_total) + if not passtotal: passtotal = 0 + # Push data for this loop on the stack. + loop_total.append(passtotal) + loop_start.append(i) + loop_pass.append(0) + loop_name.append(var) + + # Disable output of loop block if the number of passes + # in this loop is zero. + if passtotal == 0: + # This loop is empty. + output_control.append(DISABLE_OUTPUT) + self.DEB("LOOP: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("LOOP: FIRST PASS: %s TOTAL: %d"\ + % (var, passtotal)) + + elif token == "." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(ENABLE_OUTPUT) + self.DEB("IF: ENABLE: " + str(var)) + else: + output_control.append(DISABLE_OUTPUT) + self.DEB("IF: DISABLE: " + str(var)) + + elif token == "." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(DISABLE_OUTPUT) + self.DEB("UNLESS: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("UNLESS: ENABLE: " + str(var)) + + elif token == "." + + # If this loop was not disabled, then record the pass. + if loop_total[-1] > 0: loop_pass[-1] += 1 + + if loop_pass[-1] == loop_total[-1]: + # There are no more passes in this loop. Pop + # the loop from stack. + loop_pass.pop() + loop_name.pop() + loop_start.pop() + loop_total.pop() + output_control.pop() + self.DEB("LOOP: END") + else: + # Jump to the beggining of this loop block + # to process next pass of the loop. + i = loop_start[-1] + self.DEB("LOOP: NEXT PASS") + + elif token == "." + output_control.pop() + self.DEB("IF: END") + + elif token == "." + output_control.pop() + self.DEB("UNLESS: END") + + elif token == "." + if output_control[-1] == DISABLE_OUTPUT: + # Condition was false, activate the ELSE block. + output_control[-1] = ENABLE_OUTPUT + self.DEB("ELSE: ENABLE") + elif output_control[-1] == ENABLE_OUTPUT: + # Condition was true, deactivate the ELSE block. + output_control[-1] = DISABLE_OUTPUT + self.DEB("ELSE: DISABLE") + else: + raise TemplateError, "BUG: ELSE: INVALID FLAG" + + elif token == " +

+ HTMLTMPL WARNING:
+ Cannot include template: %s +

+
+ """ % filename + self.DEB("CANNOT INCLUDE WARNING") + + elif token == "." % token + + elif DISABLE_OUTPUT not in output_control: + # Raw textual template data. + # If output of current block is not disabled, then + # append template data to the output buffer. + out += token + + i += 1 + # end of the big while loop + + # Check whether all opening statements were closed. + if loop_name: raise TemplateError, "Missing ." + if output_control: raise TemplateError, "Missing or " + return out + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def find_value(self, var, loop_name, loop_pass, loop_total, + global_override=None): + """ Search the self._vars data structure to find variable var + located in currently processed pass of a loop which + is currently being processed. If the variable is an ordinary + variable, then return it. + + If the variable is an identificator of a loop, then + return the total number of times this loop will + be executed. + + Return an empty string, if the variable is not + found at all. + + @hidden + """ + # Search for the requested variable in magic vars if the name + # of the variable starts with "__" and if we are inside a loop. + if self._magic_vars and var.startswith("__") and loop_name: + return self.magic_var(var, loop_pass[-1], loop_total[-1]) + + # Search for an ordinary variable or for a loop. + # Recursively search in self._vars for the requested variable. + scope = self._vars + globals = [] + for i in range(len(loop_name)): + # If global lookup is on then push the value on the stack. + if ((self._global_vars and global_override != "0") or \ + global_override == "1") and scope.has_key(var) and \ + self.is_ordinary_var(scope[var]): + globals.append(scope[var]) + + # Descent deeper into the hierarchy. + if scope.has_key(loop_name[i]) and scope[loop_name[i]]: + scope = scope[loop_name[i]][loop_pass[i]] + else: + return "" + + if scope.has_key(var): + # Value exists in current loop. + if type(scope[var]) == ListType: + # The requested value is a loop. + # Return total number of its passes. + return len(scope[var]) + else: + return scope[var] + elif globals and \ + ((self._global_vars and global_override != "0") or \ + global_override == "1"): + # Return globally looked up value. + return globals.pop() + else: + # No value found. + if var[0].isupper(): + # This is a loop name. + # Return zero, because the user wants to know number + # of its passes. + return 0 + else: + return "" + + def magic_var(self, var, loop_pass, loop_total): + """ Resolve and return value of a magic variable. + Raise an exception if the magic variable is not recognized. + + @hidden + """ + self.DEB("MAGIC: '%s', PASS: %d, TOTAL: %d"\ + % (var, loop_pass, loop_total)) + if var == "__FIRST__": + if loop_pass == 0: + return 1 + else: + return 0 + elif var == "__LAST__": + if loop_pass == loop_total - 1: + return 1 + else: + return 0 + elif var == "__INNER__": + # If this is neither the first nor the last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + return 1 + else: + return 0 + elif var == "__PASS__": + # Magic variable __PASS__ counts passes from one. + return loop_pass + 1 + elif var == "__PASSTOTAL__": + return loop_total + elif var == "__ODD__": + # Internally pass numbers stored in loop_pass are counted from + # zero. But the template language presents them counted from one. + # Therefore we must add one to the actual loop_pass value to get + # the value we present to the user. + if (loop_pass + 1) % 2 != 0: + return 1 + else: + return 0 + elif var.startswith("__EVERY__"): + # Magic variable __EVERY__x is never true in first or last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + # Check if an integer follows the variable name. + try: + every = int(var[9:]) # nine is length of "__EVERY__" + except ValueError: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Invalid pass number." + else: + if not every: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Pass number cannot be zero." + elif (loop_pass + 1) % every == 0: + self.DEB("MAGIC: EVERY: " + str(every)) + return 1 + else: + return 0 + else: + return 0 + else: + raise TemplateError, "Invalid magic variable '%s'." % var + + def escape(self, str, override=""): + """ Escape a string either by HTML escaping or by URL escaping. + @hidden + """ + ESCAPE_QUOTES = 1 + if (self._html_escape and override != "NONE" and override != "0" and \ + override != "URL") or override == "HTML" or override == "1": + return cgi.escape(str, ESCAPE_QUOTES) + elif override == "URL": + return urllib.quote_plus(str) + else: + return str + + def is_ordinary_var(self, var): + """ Return true if var is a scalar. (not a reference to loop) + @hidden + """ + if type(var) == StringType or type(var) == IntType or \ + type(var) == LongType or type(var) == FloatType: + return 1 + else: + return 0 + + +############################################## +# CLASS: TemplateCompiler # +############################################## + +class TemplateCompiler: + """ Preprocess, parse, tokenize and compile the template. + + This class parses the template and produces a 'compiled' form + of it. This compiled form is an instance of the Template + class. The compiled form is used as input for the TemplateProcessor + which uses it to actually process the template. + + This class should be used direcly only when you need to compile + a template from a string. If your template is in a file, then you + should use the TemplateManager class which provides + a higher level interface to this class and also can save the + compiled template to disk in a precompiled form. + """ + + def __init__(self, include=1, max_include=5, comments=1, gettext=0, + debug=0): + """ Constructor. + + @header __init__(include=1, max_include=5, comments=1, gettext=0, + debug=0) + + @param include Enable or disable included templates. + @param max_include Maximum depth of nested inclusions. + @param comments Enable or disable template comments. + @param gettext Enable or disable gettext support. + @param debug Enable or disable debugging messages. + """ + + self._include = include + self._max_include = max_include + self._comments = comments + self._gettext = gettext + self._debug = debug + + # This is a list of filenames of all included templates. + # It's modified by the include_templates() method. + self._include_files = [] + + # This is a counter of current inclusion depth. It's used to prevent + # infinite recursive includes. + self._include_level = 0 + + def compile(self, file): + """ Compile template from a file. + + @header compile(file) + @return Compiled template. + The return value is an instance of the Template + class. + + @param file Filename of the template. + See the prepare() method of the TemplateManager + class for exaplanation of this parameter. + """ + + self.DEB("COMPILING FROM FILE: " + file) + self._include_path = os.path.join(os.path.dirname(file), INCLUDE_DIR) + tokens = self.parse(self.read(file)) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, file, self._include_files, + tokens, compile_params, self._debug) + + def compile_string(self, data): + """ Compile template from a string. + + This method compiles a template from a string. The + template cannot include any templates. + TMPL_INCLUDE statements are turned into warnings. + + @header compile_string(data) + @return Compiled template. + The return value is an instance of the Template + class. + + @param data String containing the template data. + """ + self.DEB("COMPILING FROM STRING") + self._include = 0 + tokens = self.parse(data) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, None, None, tokens, compile_params, + self._debug) + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def read(self, filename): + """ Read content of file and return it. Raise an error if a problem + occurs. + @hidden + """ + self.DEB("READING: " + filename) + try: + f = None + try: + f = open(filename, "r") + data = f.read() + except IOError, (errno, errstr): + raise TemplateError, "IO error while reading template '%s': "\ + "(%d) %s" % (filename, errno, errstr) + else: + return data + finally: + if f: f.close() + + def parse(self, template_data): + """ Parse the template. This method is recursively called from + within the include_templates() method. + + @return List of processing tokens. + @hidden + """ + if self._comments: + self.DEB("PREPROCESS: COMMENTS") + template_data = self.remove_comments(template_data) + tokens = self.tokenize(template_data) + if self._include: + self.DEB("PREPROCESS: INCLUDES") + self.include_templates(tokens) + return tokens + + def remove_comments(self, template_data): + """ Remove comments from the template data. + @hidden + """ + pattern = r"### .*" + return re.sub(pattern, "", template_data) + + def include_templates(self, tokens): + """ Process TMPL_INCLUDE statements. Use the include_level counter + to prevent infinite recursion. Record paths to all included + templates to self._include_files. + @hidden + """ + i = 0 + out = "" # buffer for output + skip_params = 0 + + # Process the list of tokens. + while 1: + if i == len(tokens): break + if skip_params: + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token == "." + self._include_level += 1 + if self._include_level > self._max_include: + # Do not include the template. + # Protection against infinite recursive includes. + skip_params = 1 + self.DEB("INCLUDE: LIMIT REACHED: " + filename) + else: + # Include the template. + skip_params = 0 + include_file = os.path.join(self._include_path, filename) + self._include_files.append(include_file) + include_data = self.read(include_file) + include_tokens = self.parse(include_data) + + # Append the tokens from the included template to actual + # position in the tokens list, replacing the TMPL_INCLUDE + # token and its parameters. + tokens[i:i+PARAMS_NUMBER+1] = include_tokens + i = i + len(include_tokens) + self.DEB("INCLUDED: " + filename) + continue # Do not increment 'i' below. + i += 1 + # end of the main while loop + + if self._include_level > 0: self._include_level -= 1 + return out + + def tokenize(self, template_data): + """ Split the template into tokens separated by template statements. + The statements itself and associated parameters are also + separately included in the resulting list of tokens. + Return list of the tokens. + + @hidden + """ + self.DEB("TOKENIZING TEMPLATE") + # NOTE: The TWO double quotes in character class in the regexp below + # are there only to prevent confusion of syntax highlighter in Emacs. + pattern = r""" + (?:^[ \t]+)? # eat spaces, tabs (opt.) + (< + (?:!--[ ])? # comment start + space (opt.) + /?TMPL_[A-Z]+ # closing slash / (opt.) + statement + [ a-zA-Z0-9""/.=:_\\-]* # this spans also comments ending (--) + >) + [%s]? # eat trailing newline (opt.) + """ % os.linesep + rc = re.compile(pattern, re.VERBOSE | re.MULTILINE) + split = rc.split(template_data) + tokens = [] + for statement in split: + if statement.startswith(" 0 and '=' not in params[0]: + # implicit identifier + name = params[0] + del params[0] + else: + # explicit identifier as a 'NAME' parameter + name = self.find_param("NAME", params) + self.DEB("TOKENIZER: NAME: " + str(name)) + return name + + def find_param(self, param, params): + """ Extract value of parameter from a statement. + @hidden + """ + for pair in params: + name, value = pair.split("=") + if not name or not value: + raise TemplateError, "Syntax error in template." + if name == param: + if value[0] == '"': + # The value is in double quotes. + ret_value = value[1:-1] + else: + # The value is without double quotes. + ret_value = value + self.DEB("TOKENIZER: PARAM: '%s' => '%s'" % (param, ret_value)) + return ret_value + else: + self.DEB("TOKENIZER: PARAM: '%s' => NOT DEFINED" % param) + return None + + +############################################## +# CLASS: Template # +############################################## + +class Template: + """ This class represents a compiled template. + + This class provides storage and methods for the compiled template + and associated metadata. It's serialized by pickle if we need to + save the compiled template to disk in a precompiled form. + + You should never instantiate this class directly. Always use the + TemplateManager or TemplateCompiler classes to + create the instances of this class. + + The only method which you can directly use is the is_uptodate + method. + """ + + def __init__(self, version, file, include_files, tokens, compile_params, + debug=0): + """ Constructor. + @hidden + """ + self._version = version + self._file = file + self._tokens = tokens + self._compile_params = compile_params + self._debug = debug + self._mtime = None + self._include_mtimes = {} + + if not file: + self.DEB("TEMPLATE WAS COMPILED FROM A STRING") + return + + # Save modifitcation time of the main template file. + if os.path.isfile(file): + self._mtime = os.path.getmtime(file) + else: + raise TemplateError, "Template: file does not exist: '%s'" % file + + # Save modificaton times of all included template files. + for inc_file in include_files: + if os.path.isfile(inc_file): + self._include_mtimes[inc_file] = os.path.getmtime(inc_file) + else: + raise TemplateError, "Template: file does not exist: '%s'"\ + % inc_file + + self.DEB("NEW TEMPLATE CREATED") + + def is_uptodate(self, compile_params=None): + """ Check whether the compiled template is uptodate. + + Return true if this compiled template is uptodate. + Return false, if the template source file was changed on the + disk since it was compiled. + Works by comparison of modification times. + Also takes modification times of all included templates + into account. + + @header is_uptodate(compile_params=None) + @return True if the template is uptodate, false otherwise. + + @param compile_params Only for internal use. + Do not use this optional parameter. It's intended only for + internal use by the TemplateManager. + """ + if not self._file: + self.DEB("TEMPLATE COMPILED FROM A STRING") + return 0 + + if self._version != __version__: + self.DEB("TEMPLATE: VERSION NOT UPTODATE") + return 0 + + if compile_params != None and compile_params != self._compile_params: + self.DEB("TEMPLATE: DIFFERENT COMPILATION PARAMS") + return 0 + + # Check modification times of the main template and all included + # templates. If the included template no longer exists, then + # the problem will be resolved when the template is recompiled. + + # Main template file. + if not (os.path.isfile(self._file) and \ + self._mtime == os.path.getmtime(self._file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + self._file) + return 0 + + # Included templates. + for inc_file in self._include_mtimes.keys(): + if not (os.path.isfile(inc_file) and \ + self._include_mtimes[inc_file] == \ + os.path.getmtime(inc_file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + inc_file) + return 0 + else: + self.DEB("TEMPLATE: UPTODATE") + return 1 + + def tokens(self): + """ Get tokens of this template. + @hidden + """ + return self._tokens + + def file(self): + """ Get filename of the main file of this template. + @hidden + """ + return self._file + + def debug(self, debug): + """ Get debugging state. + @hidden + """ + self._debug = debug + + ############################################## + # PRIVATE METHODS # + ############################################## + + def __getstate__(self): + """ Used by pickle when the class is serialized. + Remove the 'debug' attribute before serialization. + @hidden + """ + dict = copy.copy(self.__dict__) + del dict["_debug"] + return dict + + def __setstate__(self, dict): + """ Used by pickle when the class is unserialized. + Add the 'debug' attribute. + @hidden + """ + dict["_debug"] = 0 + self.__dict__ = dict + + + def DEB(self, str): + """ Print debugging message to stderr. + @hidden + """ + if self._debug: print >> sys.stderr, str + + +############################################## +# EXCEPTIONS # +############################################## + +class TemplateError(Exception): + """ Fatal exception. Raised on runtime or template syntax errors. + + This exception is raised when a runtime error occurs or when a syntax + error in the template is found. It has one parameter which always + is a string containing a description of the error. + + All potential IOError exceptions are handled by the module and are + converted to TemplateError exceptions. That means you should catch the + TemplateError exception if there is a possibility that for example + the template file will not be accesssible. + + The exception can be raised by constructors or by any method of any + class. + + The instance is no longer usable when this exception is raised. + """ + + def __init__(self, error): + """ Constructor. + @hidden + """ + Exception.__init__(self, "Htmltmpl error: " + error) + + +class PrecompiledError(Exception): + """ This exception is _PRIVATE_ and non fatal. + @hidden + """ + + def __init__(self, template): + """ Constructor. + @hidden + """ + Exception.__init__(self, template) + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/sanitize.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/sanitize.py new file mode 100755 index 0000000..c98b14d --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/sanitize.py @@ -0,0 +1,354 @@ +""" +sanitize: bringing sanitiy to world of messed-up data +""" + +__author__ = ["Mark Pilgrim ", + "Aaron Swartz "] +__contributors__ = ["Sam Ruby "] +__license__ = "BSD" +__version__ = "0.25" + +_debug = 0 + +# If you want sanitize to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +import sgmllib, re + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 + + _chardet = lambda data: chardet.detect(data)['encoding'] +except: + chardet = None + _chardet = lambda data: None + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + _r_barebang = re.compile(r'') + + def __init__(self, encoding): + self.encoding = encoding + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + def feed(self, data): + data = self._r_barebang.sub(r'<!\1', data) + data = self._r_bareamp.sub("&", data) + data = self._r_shorttag.sub(self._shorttag_replace, data) + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + + def normalize_attrs(self, attrs): + # utility method to be called by descendants + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
, tag='pre', attrs=[('class', 'screen')]
+        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
+        uattrs = []
+        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
+        for key, value in attrs:
+            if type(value) != type(u''):
+                value = unicode(value, self.encoding)
+            uattrs.append((unicode(key, self.encoding), value))
+        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
+        if tag in self.elements_no_end_tag:
+            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
+        else:
+            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
+
+    def unknown_endtag(self, tag):
+        # called for each end tag, e.g. for 
, tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + self.pieces.append('&%(ref)s;' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', + 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', + 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', + 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', + 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'textarea', 'tbody', 'td', 'tfoot', 'th', + 'thead', 'tr', 'tt', 'u', 'ul', 'var'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', + 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', + 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', + 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', + 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', + 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', + 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', + 'usemap', 'valign', 'value', 'vspace', 'width'] + + ignorable_elements = ['script', 'applet', 'style'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.tag_stack = [] + self.ignore_level = 0 + + def feed(self, data): + _BaseHTMLProcessor.feed(self, data) + while self.tag_stack: + _BaseHTMLProcessor.unknown_endtag(self, self.tag_stack.pop()) + + def unknown_starttag(self, tag, attrs): + if tag in self.ignorable_elements: + self.ignore_level += 1 + return + + if self.ignore_level: + return + + if tag in self.acceptable_elements: + attrs = self.normalize_attrs(attrs) + attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] + if tag not in self.elements_no_end_tag: + self.tag_stack.append(tag) + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + + def unknown_endtag(self, tag): + if tag in self.ignorable_elements: + self.ignore_level -= 1 + return + + if self.ignore_level: + return + + if tag in self.acceptable_elements and tag not in self.elements_no_end_tag: + match = False + while self.tag_stack: + top = self.tag_stack.pop() + if top == tag: + match = True + break + _BaseHTMLProcessor.unknown_endtag(self, top) + + if match: + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.ignore_level: + text = text.replace('<', '') + _BaseHTMLProcessor.handle_data(self, text) + +def HTML(htmlSource, encoding='utf8'): + p = _HTMLSanitizer(encoding) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count(''): + data = data.split('>', 1)[1] + if data.count('' % self.url) + + def test_changedurl(self): + # change the URL directly + self.channel.url = self.changed_url + self.assertEqual(self.channel.feed_information(), + "<%s> (formerly <%s>)" % (self.changed_url, self.url)) + +if __name__ == '__main__': + unittest.main() diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/tests/test_main.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/tests/test_main.py new file mode 100755 index 0000000..c2be62d --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/tests/test_main.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +import os, sys, shutil, errno, unittest +from ConfigParser import ConfigParser +from StringIO import StringIO +import planet + +class MainTest(unittest.TestCase): + + def test_minimal(self): + configp = ConfigParser() + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", []) + + def test_onefeed(self): + configp = ConfigParser() + configp.readfp(StringIO("""[http://www.example.com/] +name = Mary +""")) + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", [], True) + + + def test_generateall(self): + configp = ConfigParser() + configp.readfp(StringIO("""[http://www.example.com/] +name = Mary +""")) + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", [], True) + basedir = os.path.join(os.path.dirname(os.path.abspath(sys.modules[__name__].__file__)), 'data') + os.mkdir(self.output_dir) + t_file_names = ['simple', 'simple2'] + self._remove_cached_templates(basedir, t_file_names) + t_files = [os.path.join(basedir, t_file) + '.tmpl' for t_file in t_file_names] + my_planet.generate_all_files(t_files, "Planet Name", + 'http://example.com/', 'http://example.com/feed/', 'Mary', 'mary@example.com') + for file_name in t_file_names: + name = os.path.join(self.output_dir, file_name) + content = file(name).read() + self.assertEqual(content, 'Mary\n') + + def _remove_cached_templates(self, basedir, template_files): + """ + Remove the .tmplc files and force them to be rebuilt. + + This is required mainly so that the tests don't fail in mysterious ways in + directories that have been moved, eg 'branches/my-branch' to + 'branches/mysterious-branch' -- the .tmplc files seem to remember their full + path + """ + for file in template_files: + path = os.path.join(basedir, file + '.tmplc') + try: + os.remove(path) + except OSError, e: + # we don't care about the file not being there, we care about + # everything else + if e.errno != errno.ENOENT: + raise + + def setUp(self): + super(MainTest, self).setUp() + self.output_dir = 'output' + + def tearDown(self): + super(MainTest, self).tearDown() + shutil.rmtree(self.output_dir, ignore_errors = True) + shutil.rmtree('cache', ignore_errors = True) + +if __name__ == '__main__': + unittest.main() diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/planet/tests/test_sanitize.py b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/tests/test_sanitize.py new file mode 100755 index 0000000..f0f1d42 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/planet/tests/test_sanitize.py @@ -0,0 +1,125 @@ +# adapted from http://www.iamcal.com/publish/articles/php/processing_html_part_2/ +# and from http://feedparser.org/tests/wellformed/sanitize/ +# by Aaron Swartz, 2006, public domain + +import unittest, new +from planet import sanitize + +class SanitizeTest(unittest.TestCase): pass + +# each call to HTML adds a test case to SanitizeTest +testcases = 0 +def HTML(a, b): + global testcases + testcases += 1 + func = lambda self: self.assertEqual(sanitize.HTML(a), b) + method = new.instancemethod(func, None, SanitizeTest) + setattr(SanitizeTest, "test_%d" % testcases, method) + +## basics +HTML("","") +HTML("hello","hello") + +## balancing tags +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("","") + +## trailing slashes +HTML('','') +HTML('','') +HTML('','') + +## balancing angle brakets +HTML('','b>') +HTML('','>') +HTML('foofoo','b>foo') +HTML('>') +HTML('b><','b>') +HTML('>','>') + +## attributes +HTML('','') +HTML('','') +HTML('','') + +## dangerous tags (a small sample) +sHTML = lambda x: HTML(x, 'safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') + +for x in ['onabort', 'onblur', 'onchange', 'onclick', 'ondblclick', 'onerror', 'onfocus', 'onkeydown', 'onkeypress', 'onkeyup', 'onload', 'onmousedown', 'onmouseout', 'onmouseover', 'onmouseup', 'onreset', 'resize', 'onsubmit', 'onunload']: + HTML('' % x, + '') + +HTML('never trust your upstream platypus', 'never trust your upstream platypus') + +## ignorables +HTML('foo', 'foo') + +## non-allowed tags +HTML('','') +HTML('\r\n\r\n\r\n\r\n\r\nfunction executeMe()\r\n{\r\n\r\n\r\n\r\n\r\n/* + + + + + + + + + + + +
+ + +
+ + +
+

16 March 2010

+ +
+ + +
+
+
+

+ +Yakın Doğu’da Seminer Rüzgarları +

+
+
+
+

Geçen haftadan beri Yakın Doğu Üniversitesi’nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir’lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu’nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.

+

Yakın Doğu’ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.

+

+

Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.

+

+

Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.

+

+

Ali Erdinc’in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.

+

+

Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite duyuru sayfası, Facebook ve Twitter‘dan takip edebileceklerini söyleyelim. Hatta Kıbrıs’ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.

+

Lefkoşa’ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs’tan bildirdi.

+

+
+
+
+ + + + + + + + + +
+
+ +
+
+
+

05 February 2010

+ +
+ + +
+
+
+

+ +100 ml +

+
+
+
+

1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs’a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.

+

Şimdi olayın benim açımdan garip noktalarına gelelim

+

* Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.

+

* Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.

+

* Elimle çöpe attım, o çok koydu.

+

Ben de bunun üzerine Ulaştırma Bakanlığı’na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006′da İngiltere’de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006′da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç’te, ABD ve Kanada’da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt’lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:

+

“Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.”

+

Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.

+

Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.

+
+
+
+ + + + + + + + + +
+
+ +
+
+
+

29 January 2010

+ +
+ + +
+
+
+

+ +Artık Sun yok! +

+
+
+
+

iPsunoraclead haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun’ı satın alma işlemini bitirdi. Artık www.sun.com adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.

+

Beni en çok ilgilendiren konular ise Sun’ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.

+

Umarım hepimiz için mutlu son olur…

+

Ek: Kültür Mantarı‘nın yönlendirmesi ile James Gosling’in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım…

+

sunrip


+
+
+ + + + + + +
+
+ +
+
+
+

24 December 2009

+ +
+ + +
+
+
+

+ +EMO 13. Ulusal Kongresi +

+
+
+
+

EMO’nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan 13. Ulusal Kongresi kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda “Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz” ve 11.30-12.30 arasında da “Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.

+

Genel olarak yüklü bir programı olan bu etkinlikte çeşitli LKD seminerleri de olacak. Buyrunuz geliniz!


+
+
+ + + + + + +
+
+ +
+
+
+

24 September 2009

+ +
+ + +
+
+
+

+ +Intel, Atom, Moblin +

+
+
+
+

Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel’e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin’i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel’den üç önemli açıklama oldu…

+

Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. Atom Developer Program‘ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var… ( Ben kayıt olacağım :) )

+

İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin’in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir akıllı telefon üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu :) Geçenlerde de yazmıştım,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız…


+
+
+ + + + + + +
+
+ +
+
+
+

25 August 2009

+ +
+ + +
+
+
+

+ +Teknik Destek Kopya Kağıtı +

+
+
+
+

xkcd’de geçen gün yayınlanan bu teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.

+

teknikdestek
+İsteyenler için ODF hali de burada


+
+
+ + + + + + +
+
+ +
+
+
+

18 August 2009

+ +
+ + +
+
+
+

+ +Korsan Değil “Fikir Hırsızı” +

+
+
+
+

Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu’nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma başlatmış. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.

+

Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek :) Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor…

+

Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.

+

Son olarak bir haber daha verelim Pirate Bay’in 23 GB’lik arşivi de paylaşıma açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin :) Korsan değil özgür yazılım kullanın!


+
+
+ + + + + + +
+
+ +
+
+
+

07 July 2009

+ +
+ + +
+
+
+

+ +Mobil Cihazlar Dünyasında Neler Oluyor? +

+
+
+
+

moblinBir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. Android, WebOS ve iPhone OS‘a  karşı Symbian‘ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan’ı açık kaynak kodlu olarak  bu vakfa devretmişti.

+

Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC’lerin geliştirilmesine olanak sağladı ve NetBook’lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.

+

Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : Moblin.

+

Moblin’e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia’nın kendi tabletlerinde kullanmak amacıyla ürettiği Maemo‘yu desteklemeye karar verdiğini açıkladı. Intel’de Moblin’i Linux Vakfı’na devrettiğini ve destek konusunda da Novell’le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia’nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo’yu Qt’ye taşıyacağını ilan etti.

+

İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo’yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID’ler üretip bunlarda Mameo’mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian’ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID’ler konusunda neler planlıyor? Bu planları içerisinde Moblin’i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo’ya yatırım mı yapacaklar? NetBook’larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?

+

Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?


+
+
+ + + + + + +
+
+ +
+
+
+

17 June 2009

+ +
+ + +
+
+
+

+ +LKD Genel Kurulu için Ankara’ya +

+
+
+
+

Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara’ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan’dan…

+

***

+

Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,

+

TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca
+kurum.
+Evet bu kurum malesef bilet satmak istemiyor.

+

1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir
+sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları
+portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)

+

2- web sitesindeki bilet satış uygulamasında banka sıra makinelerinin bir
+türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk
+karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin
+bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir
+koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve
+internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.
+(İnternet olsun dedim, sonuç yok dedi.)

+

3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,
+veee… Evet, biz satıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii
+ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.

+

4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize
+bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş
+dönüş 1 tam 1 öğrenci istiyorum. Satıcı önce
+- G/D kesmiyorum diyor buradan.!
+- Nasıl yani?
+- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?
+Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.
+- Elbette G/D niye alayım indirim var diyorum.
+Neyse girip deniyor, gelen koltuk numaralarını soruyorum.
+- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)
+- Değiştiremiyor musunuz?
+- Malesef.
+- Internet sürümüne mi giriyorsunuz diyorum ister istemez.
+- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen
+üzerine ek komisyon ekleniyor sadece.)
+- Kim koltuk seçtiriyor bana ?
+- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.

+

5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.
+Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk
+seçebiliyor musunuz?
+- Bakalım yer boş olursa seçebiliriz diyor satıcı bu kez.
+- Ohh nihayet.
+- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.
+- Öğrencide G/D farkı yok cevabı geliyor.
+- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız
+öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe
+yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat
+eder.)
+- Kredi kartımı, peşin mi?
+- DIINN ! kredi kartı.. var dimi?
+- Evet, 112 TL
+- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.

+

Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye
+kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak
+bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.

+

Velhasıl,
+Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX
+Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX

+

Hayırlı yolculuklar.

+

=====================
+Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor
+daha. 2-3 nolarda satılan yerler var.

+

Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya
+satış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi
+bekliyor olabilir, kimbilir?

+

Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?


+
+
+ + + + + + +
+
+ +
+
+
+

16 June 2009

+ +
+ + +
+
+
+

+ +IE, WTW ve Gıda Yardımı +

+
+
+
+

wfp-wtwBugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft’un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8′in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara buradan ulaşabilirsiniz…

+

Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin TechCrunch‘da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.

+

İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin… Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı’nın Walk The Web kampanyasına bir göz atmanızı öneririm…

+ +

Son olarak da bugünlerde herkese önerdiğim gibi Yuva ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.


+
+
+ + + + + + +
+
+ +
+
+
+

28 May 2009

+ +
+ + +
+
+
+

+ +TBD Bilişim Kongresi’nde Özgür Yazılım Paneli +

+
+
+
+

TBD’nin bu yıl 3.sünü düzenlediği İstanbul Bilişim Kongresi‘nde Pazar günü saat 14:00′de Özgür Yazılım Paneli olacaktır. Panel’de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur…

+

Yer: Marmara Üniversitesi Nişantaşı Kampüsü
+Erdal İnönü Bilim ve Kültür Merkezi
+Tarih: 31 Mayıs Pazar, 14:00 - 15:20
+Oturum başkanı: Görkem Çetin
+Konuşmacılar: Enver Altın, Hakan Uygun, Cahit Cengizhan


+
+
+ + + + + + +
+
+ +
+
+
+

13 April 2009

+ +
+ + +
+
+
+

+ +Sıralama Algoritmaları +

+
+
+
+

Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu siteye bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz…


+
+
+ + + + + + +
+
+ +
+
+
+
+ + + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/www/jquery.cookie.min.js b/DJAGEN/tags/djagen_old/djagen/gezegen/www/jquery.cookie.min.js new file mode 100755 index 0000000..aab4864 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/www/jquery.cookie.min.js @@ -0,0 +1 @@ +jQuery.cookie=function(B,I,L){if(typeof I!="undefined"){L=L||{};if(I===null){I="";L.expires=-1}var E="";if(L.expires&&(typeof L.expires=="number"||L.expires.toUTCString)){var F;if(typeof L.expires=="number"){F=new Date();F.setTime(F.getTime()+(L.expires*24*60*60*1000))}else{F=L.expires}E="; expires="+F.toUTCString()}var K=L.path?"; path="+(L.path):"";var G=L.domain?"; domain="+(L.domain):"";var A=L.secure?"; secure":"";document.cookie=[B,"=",encodeURIComponent(I),E,K,G,A].join("")}else{var D=null;if(document.cookie&&document.cookie!=""){var J=document.cookie.split(";");for(var H=0;H)[^>]*$|^#(\w+)$/,isSimple=/^.[^:#\[\.]*$/,undefined;jQuery.fn=jQuery.prototype={init:function(selector,context){selector=selector||document;if(selector.nodeType){this[0]=selector;this.length=1;return this;}if(typeof selector=="string"){var match=quickExpr.exec(selector);if(match&&(match[1]||!context)){if(match[1])selector=jQuery.clean([match[1]],context);else{var elem=document.getElementById(match[3]);if(elem){if(elem.id!=match[3])return jQuery().find(selector);return jQuery(elem);}selector=[];}}else +return jQuery(context).find(selector);}else if(jQuery.isFunction(selector))return jQuery(document)[jQuery.fn.ready?"ready":"load"](selector);return this.setArray(jQuery.makeArray(selector));},jquery:"1.2.6",size:function(){return this.length;},length:0,get:function(num){return num==undefined?jQuery.makeArray(this):this[num];},pushStack:function(elems){var ret=jQuery(elems);ret.prevObject=this;return ret;},setArray:function(elems){this.length=0;Array.prototype.push.apply(this,elems);return this;},each:function(callback,args){return jQuery.each(this,callback,args);},index:function(elem){var ret=-1;return jQuery.inArray(elem&&elem.jquery?elem[0]:elem,this);},attr:function(name,value,type){var options=name;if(name.constructor==String)if(value===undefined)return this[0]&&jQuery[type||"attr"](this[0],name);else{options={};options[name]=value;}return this.each(function(i){for(name in options)jQuery.attr(type?this.style:this,name,jQuery.prop(this,options[name],type,i,name));});},css:function(key,value){if((key=='width'||key=='height')&&parseFloat(value)<0)value=undefined;return this.attr(key,value,"curCSS");},text:function(text){if(typeof text!="object"&&text!=null)return this.empty().append((this[0]&&this[0].ownerDocument||document).createTextNode(text));var ret="";jQuery.each(text||this,function(){jQuery.each(this.childNodes,function(){if(this.nodeType!=8)ret+=this.nodeType!=1?this.nodeValue:jQuery.fn.text([this]);});});return ret;},wrapAll:function(html){if(this[0])jQuery(html,this[0].ownerDocument).clone().insertBefore(this[0]).map(function(){var elem=this;while(elem.firstChild)elem=elem.firstChild;return elem;}).append(this);return this;},wrapInner:function(html){return this.each(function(){jQuery(this).contents().wrapAll(html);});},wrap:function(html){return this.each(function(){jQuery(this).wrapAll(html);});},append:function(){return this.domManip(arguments,true,false,function(elem){if(this.nodeType==1)this.appendChild(elem);});},prepend:function(){return this.domManip(arguments,true,true,function(elem){if(this.nodeType==1)this.insertBefore(elem,this.firstChild);});},before:function(){return this.domManip(arguments,false,false,function(elem){this.parentNode.insertBefore(elem,this);});},after:function(){return this.domManip(arguments,false,true,function(elem){this.parentNode.insertBefore(elem,this.nextSibling);});},end:function(){return this.prevObject||jQuery([]);},find:function(selector){var elems=jQuery.map(this,function(elem){return jQuery.find(selector,elem);});return this.pushStack(/[^+>] [^+>]/.test(selector)||selector.indexOf("..")>-1?jQuery.unique(elems):elems);},clone:function(events){var ret=this.map(function(){if(jQuery.browser.msie&&!jQuery.isXMLDoc(this)){var clone=this.cloneNode(true),container=document.createElement("div");container.appendChild(clone);return jQuery.clean([container.innerHTML])[0];}else +return this.cloneNode(true);});var clone=ret.find("*").andSelf().each(function(){if(this[expando]!=undefined)this[expando]=null;});if(events===true)this.find("*").andSelf().each(function(i){if(this.nodeType==3)return;var events=jQuery.data(this,"events");for(var type in events)for(var handler in events[type])jQuery.event.add(clone[i],type,events[type][handler],events[type][handler].data);});return ret;},filter:function(selector){return this.pushStack(jQuery.isFunction(selector)&&jQuery.grep(this,function(elem,i){return selector.call(elem,i);})||jQuery.multiFilter(selector,this));},not:function(selector){if(selector.constructor==String)if(isSimple.test(selector))return this.pushStack(jQuery.multiFilter(selector,this,true));else +selector=jQuery.multiFilter(selector,this);var isArrayLike=selector.length&&selector[selector.length-1]!==undefined&&!selector.nodeType;return this.filter(function(){return isArrayLike?jQuery.inArray(this,selector)<0:this!=selector;});},add:function(selector){return this.pushStack(jQuery.unique(jQuery.merge(this.get(),typeof selector=='string'?jQuery(selector):jQuery.makeArray(selector))));},is:function(selector){return!!selector&&jQuery.multiFilter(selector,this).length>0;},hasClass:function(selector){return this.is("."+selector);},val:function(value){if(value==undefined){if(this.length){var elem=this[0];if(jQuery.nodeName(elem,"select")){var index=elem.selectedIndex,values=[],options=elem.options,one=elem.type=="select-one";if(index<0)return null;for(var i=one?index:0,max=one?index+1:options.length;i=0||jQuery.inArray(this.name,value)>=0);else if(jQuery.nodeName(this,"select")){var values=jQuery.makeArray(value);jQuery("option",this).each(function(){this.selected=(jQuery.inArray(this.value,values)>=0||jQuery.inArray(this.text,values)>=0);});if(!values.length)this.selectedIndex=-1;}else +this.value=value;});},html:function(value){return value==undefined?(this[0]?this[0].innerHTML:null):this.empty().append(value);},replaceWith:function(value){return this.after(value).remove();},eq:function(i){return this.slice(i,i+1);},slice:function(){return this.pushStack(Array.prototype.slice.apply(this,arguments));},map:function(callback){return this.pushStack(jQuery.map(this,function(elem,i){return callback.call(elem,i,elem);}));},andSelf:function(){return this.add(this.prevObject);},data:function(key,value){var parts=key.split(".");parts[1]=parts[1]?"."+parts[1]:"";if(value===undefined){var data=this.triggerHandler("getData"+parts[1]+"!",[parts[0]]);if(data===undefined&&this.length)data=jQuery.data(this[0],key);return data===undefined&&parts[1]?this.data(parts[0]):data;}else +return this.trigger("setData"+parts[1]+"!",[parts[0],value]).each(function(){jQuery.data(this,key,value);});},removeData:function(key){return this.each(function(){jQuery.removeData(this,key);});},domManip:function(args,table,reverse,callback){var clone=this.length>1,elems;return this.each(function(){if(!elems){elems=jQuery.clean(args,this.ownerDocument);if(reverse)elems.reverse();}var obj=this;if(table&&jQuery.nodeName(this,"table")&&jQuery.nodeName(elems[0],"tr"))obj=this.getElementsByTagName("tbody")[0]||this.appendChild(this.ownerDocument.createElement("tbody"));var scripts=jQuery([]);jQuery.each(elems,function(){var elem=clone?jQuery(this).clone(true)[0]:this;if(jQuery.nodeName(elem,"script"))scripts=scripts.add(elem);else{if(elem.nodeType==1)scripts=scripts.add(jQuery("script",elem).remove());callback.call(obj,elem);}});scripts.each(evalScript);});}};jQuery.fn.init.prototype=jQuery.fn;function evalScript(i,elem){if(elem.src)jQuery.ajax({url:elem.src,async:false,dataType:"script"});else +jQuery.globalEval(elem.text||elem.textContent||elem.innerHTML||"");if(elem.parentNode)elem.parentNode.removeChild(elem);}function now(){return+new Date;}jQuery.extend=jQuery.fn.extend=function(){var target=arguments[0]||{},i=1,length=arguments.length,deep=false,options;if(target.constructor==Boolean){deep=target;target=arguments[1]||{};i=2;}if(typeof target!="object"&&typeof target!="function")target={};if(length==i){target=this;--i;}for(;i-1;}},swap:function(elem,options,callback){var old={};for(var name in options){old[name]=elem.style[name];elem.style[name]=options[name];}callback.call(elem);for(var name in options)elem.style[name]=old[name];},css:function(elem,name,force){if(name=="width"||name=="height"){var val,props={position:"absolute",visibility:"hidden",display:"block"},which=name=="width"?["Left","Right"]:["Top","Bottom"];function getWH(){val=name=="width"?elem.offsetWidth:elem.offsetHeight;var padding=0,border=0;jQuery.each(which,function(){padding+=parseFloat(jQuery.curCSS(elem,"padding"+this,true))||0;border+=parseFloat(jQuery.curCSS(elem,"border"+this+"Width",true))||0;});val-=Math.round(padding+border);}if(jQuery(elem).is(":visible"))getWH();else +jQuery.swap(elem,props,getWH);return Math.max(0,val);}return jQuery.curCSS(elem,name,force);},curCSS:function(elem,name,force){var ret,style=elem.style;function color(elem){if(!jQuery.browser.safari)return false;var ret=defaultView.getComputedStyle(elem,null);return!ret||ret.getPropertyValue("color")=="";}if(name=="opacity"&&jQuery.browser.msie){ret=jQuery.attr(style,"opacity");return ret==""?"1":ret;}if(jQuery.browser.opera&&name=="display"){var save=style.outline;style.outline="0 solid black";style.outline=save;}if(name.match(/float/i))name=styleFloat;if(!force&&style&&style[name])ret=style[name];else if(defaultView.getComputedStyle){if(name.match(/float/i))name="float";name=name.replace(/([A-Z])/g,"-$1").toLowerCase();var computedStyle=defaultView.getComputedStyle(elem,null);if(computedStyle&&!color(elem))ret=computedStyle.getPropertyValue(name);else{var swap=[],stack=[],a=elem,i=0;for(;a&&color(a);a=a.parentNode)stack.unshift(a);for(;i]*?)\/>/g,function(all,front,tag){return tag.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i)?all:front+">";});var tags=jQuery.trim(elem).toLowerCase(),div=context.createElement("div");var wrap=!tags.indexOf("",""]||!tags.indexOf("",""]||tags.match(/^<(thead|tbody|tfoot|colg|cap)/)&&[1,"","
"]||!tags.indexOf("",""]||(!tags.indexOf("",""]||!tags.indexOf("",""]||jQuery.browser.msie&&[1,"div
","
"]||[0,"",""];div.innerHTML=wrap[1]+elem+wrap[2];while(wrap[0]--)div=div.lastChild;if(jQuery.browser.msie){var tbody=!tags.indexOf(""&&tags.indexOf("=0;--j)if(jQuery.nodeName(tbody[j],"tbody")&&!tbody[j].childNodes.length)tbody[j].parentNode.removeChild(tbody[j]);if(/^\s/.test(elem))div.insertBefore(context.createTextNode(elem.match(/^\s*/)[0]),div.firstChild);}elem=jQuery.makeArray(div.childNodes);}if(elem.length===0&&(!jQuery.nodeName(elem,"form")&&!jQuery.nodeName(elem,"select")))return;if(elem[0]==undefined||jQuery.nodeName(elem,"form")||elem.options)ret.push(elem);else +ret=jQuery.merge(ret,elem);});return ret;},attr:function(elem,name,value){if(!elem||elem.nodeType==3||elem.nodeType==8)return undefined;var notxml=!jQuery.isXMLDoc(elem),set=value!==undefined,msie=jQuery.browser.msie;name=notxml&&jQuery.props[name]||name;if(elem.tagName){var special=/href|src|style/.test(name);if(name=="selected"&&jQuery.browser.safari)elem.parentNode.selectedIndex;if(name in elem&¬xml&&!special){if(set){if(name=="type"&&jQuery.nodeName(elem,"input")&&elem.parentNode)throw"type property can't be changed";elem[name]=value;}if(jQuery.nodeName(elem,"form")&&elem.getAttributeNode(name))return elem.getAttributeNode(name).nodeValue;return elem[name];}if(msie&¬xml&&name=="style")return jQuery.attr(elem.style,"cssText",value);if(set)elem.setAttribute(name,""+value);var attr=msie&¬xml&&special?elem.getAttribute(name,2):elem.getAttribute(name);return attr===null?undefined:attr;}if(msie&&name=="opacity"){if(set){elem.zoom=1;elem.filter=(elem.filter||"").replace(/alpha\([^)]*\)/,"")+(parseInt(value)+''=="NaN"?"":"alpha(opacity="+value*100+")");}return elem.filter&&elem.filter.indexOf("opacity=")>=0?(parseFloat(elem.filter.match(/opacity=([^)]*)/)[1])/100)+'':"";}name=name.replace(/-([a-z])/ig,function(all,letter){return letter.toUpperCase();});if(set)elem[name]=value;return elem[name];},trim:function(text){return(text||"").replace(/^\s+|\s+$/g,"");},makeArray:function(array){var ret=[];if(array!=null){var i=array.length;if(i==null||array.split||array.setInterval||array.call)ret[0]=array;else +while(i)ret[--i]=array[i];}return ret;},inArray:function(elem,array){for(var i=0,length=array.length;i*",this).remove();while(this.firstChild)this.removeChild(this.firstChild);}},function(name,fn){jQuery.fn[name]=function(){return this.each(fn,arguments);};});jQuery.each(["Height","Width"],function(i,name){var type=name.toLowerCase();jQuery.fn[type]=function(size){return this[0]==window?jQuery.browser.opera&&document.body["client"+name]||jQuery.browser.safari&&window["inner"+name]||document.compatMode=="CSS1Compat"&&document.documentElement["client"+name]||document.body["client"+name]:this[0]==document?Math.max(Math.max(document.body["scroll"+name],document.documentElement["scroll"+name]),Math.max(document.body["offset"+name],document.documentElement["offset"+name])):size==undefined?(this.length?jQuery.css(this[0],type):null):this.css(type,size.constructor==String?size:size+"px");};});function num(elem,prop){return elem[0]&&parseInt(jQuery.curCSS(elem[0],prop,true),10)||0;}var chars=jQuery.browser.safari&&parseInt(jQuery.browser.version)<417?"(?:[\\w*_-]|\\\\.)":"(?:[\\w\u0128-\uFFFF*_-]|\\\\.)",quickChild=new RegExp("^>\\s*("+chars+"+)"),quickID=new RegExp("^("+chars+"+)(#)("+chars+"+)"),quickClass=new RegExp("^([#.]?)("+chars+"*)");jQuery.extend({expr:{"":function(a,i,m){return m[2]=="*"||jQuery.nodeName(a,m[2]);},"#":function(a,i,m){return a.getAttribute("id")==m[2];},":":{lt:function(a,i,m){return im[3]-0;},nth:function(a,i,m){return m[3]-0==i;},eq:function(a,i,m){return m[3]-0==i;},first:function(a,i){return i==0;},last:function(a,i,m,r){return i==r.length-1;},even:function(a,i){return i%2==0;},odd:function(a,i){return i%2;},"first-child":function(a){return a.parentNode.getElementsByTagName("*")[0]==a;},"last-child":function(a){return jQuery.nth(a.parentNode.lastChild,1,"previousSibling")==a;},"only-child":function(a){return!jQuery.nth(a.parentNode.lastChild,2,"previousSibling");},parent:function(a){return a.firstChild;},empty:function(a){return!a.firstChild;},contains:function(a,i,m){return(a.textContent||a.innerText||jQuery(a).text()||"").indexOf(m[3])>=0;},visible:function(a){return"hidden"!=a.type&&jQuery.css(a,"display")!="none"&&jQuery.css(a,"visibility")!="hidden";},hidden:function(a){return"hidden"==a.type||jQuery.css(a,"display")=="none"||jQuery.css(a,"visibility")=="hidden";},enabled:function(a){return!a.disabled;},disabled:function(a){return a.disabled;},checked:function(a){return a.checked;},selected:function(a){return a.selected||jQuery.attr(a,"selected");},text:function(a){return"text"==a.type;},radio:function(a){return"radio"==a.type;},checkbox:function(a){return"checkbox"==a.type;},file:function(a){return"file"==a.type;},password:function(a){return"password"==a.type;},submit:function(a){return"submit"==a.type;},image:function(a){return"image"==a.type;},reset:function(a){return"reset"==a.type;},button:function(a){return"button"==a.type||jQuery.nodeName(a,"button");},input:function(a){return/input|select|textarea|button/i.test(a.nodeName);},has:function(a,i,m){return jQuery.find(m[3],a).length;},header:function(a){return/h\d/i.test(a.nodeName);},animated:function(a){return jQuery.grep(jQuery.timers,function(fn){return a==fn.elem;}).length;}}},parse:[/^(\[) *@?([\w-]+) *([!*$^~=]*) *('?"?)(.*?)\4 *\]/,/^(:)([\w-]+)\("?'?(.*?(\(.*?\))?[^(]*?)"?'?\)/,new RegExp("^([:.#]*)("+chars+"+)")],multiFilter:function(expr,elems,not){var old,cur=[];while(expr&&expr!=old){old=expr;var f=jQuery.filter(expr,elems,not);expr=f.t.replace(/^\s*,\s*/,"");cur=not?elems=f.r:jQuery.merge(cur,f.r);}return cur;},find:function(t,context){if(typeof t!="string")return[t];if(context&&context.nodeType!=1&&context.nodeType!=9)return[];context=context||document;var ret=[context],done=[],last,nodeName;while(t&&last!=t){var r=[];last=t;t=jQuery.trim(t);var foundToken=false,re=quickChild,m=re.exec(t);if(m){nodeName=m[1].toUpperCase();for(var i=0;ret[i];i++)for(var c=ret[i].firstChild;c;c=c.nextSibling)if(c.nodeType==1&&(nodeName=="*"||c.nodeName.toUpperCase()==nodeName))r.push(c);ret=r;t=t.replace(re,"");if(t.indexOf(" ")==0)continue;foundToken=true;}else{re=/^([>+~])\s*(\w*)/i;if((m=re.exec(t))!=null){r=[];var merge={};nodeName=m[2].toUpperCase();m=m[1];for(var j=0,rl=ret.length;j=0;if(!not&&pass||not&&!pass)tmp.push(r[i]);}return tmp;},filter:function(t,r,not){var last;while(t&&t!=last){last=t;var p=jQuery.parse,m;for(var i=0;p[i];i++){m=p[i].exec(t);if(m){t=t.substring(m[0].length);m[2]=m[2].replace(/\\/g,"");break;}}if(!m)break;if(m[1]==":"&&m[2]=="not")r=isSimple.test(m[3])?jQuery.filter(m[3],r,true).r:jQuery(r).not(m[3]);else if(m[1]==".")r=jQuery.classFilter(r,m[2],not);else if(m[1]=="["){var tmp=[],type=m[3];for(var i=0,rl=r.length;i=0)^not)tmp.push(a);}r=tmp;}else if(m[1]==":"&&m[2]=="nth-child"){var merge={},tmp=[],test=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(m[3]=="even"&&"2n"||m[3]=="odd"&&"2n+1"||!/\D/.test(m[3])&&"0n+"+m[3]||m[3]),first=(test[1]+(test[2]||1))-0,last=test[3]-0;for(var i=0,rl=r.length;i=0)add=true;if(add^not)tmp.push(node);}r=tmp;}else{var fn=jQuery.expr[m[1]];if(typeof fn=="object")fn=fn[m[2]];if(typeof fn=="string")fn=eval("false||function(a,i){return "+fn+";}");r=jQuery.grep(r,function(elem,i){return fn(elem,i,m,r);},not);}}return{r:r,t:t};},dir:function(elem,dir){var matched=[],cur=elem[dir];while(cur&&cur!=document){if(cur.nodeType==1)matched.push(cur);cur=cur[dir];}return matched;},nth:function(cur,result,dir,elem){result=result||1;var num=0;for(;cur;cur=cur[dir])if(cur.nodeType==1&&++num==result)break;return cur;},sibling:function(n,elem){var r=[];for(;n;n=n.nextSibling){if(n.nodeType==1&&n!=elem)r.push(n);}return r;}});jQuery.event={add:function(elem,types,handler,data){if(elem.nodeType==3||elem.nodeType==8)return;if(jQuery.browser.msie&&elem.setInterval)elem=window;if(!handler.guid)handler.guid=this.guid++;if(data!=undefined){var fn=handler;handler=this.proxy(fn,function(){return fn.apply(this,arguments);});handler.data=data;}var events=jQuery.data(elem,"events")||jQuery.data(elem,"events",{}),handle=jQuery.data(elem,"handle")||jQuery.data(elem,"handle",function(){if(typeof jQuery!="undefined"&&!jQuery.event.triggered)return jQuery.event.handle.apply(arguments.callee.elem,arguments);});handle.elem=elem;jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];handler.type=parts[1];var handlers=events[type];if(!handlers){handlers=events[type]={};if(!jQuery.event.special[type]||jQuery.event.special[type].setup.call(elem)===false){if(elem.addEventListener)elem.addEventListener(type,handle,false);else if(elem.attachEvent)elem.attachEvent("on"+type,handle);}}handlers[handler.guid]=handler;jQuery.event.global[type]=true;});elem=null;},guid:1,global:{},remove:function(elem,types,handler){if(elem.nodeType==3||elem.nodeType==8)return;var events=jQuery.data(elem,"events"),ret,index;if(events){if(types==undefined||(typeof types=="string"&&types.charAt(0)=="."))for(var type in events)this.remove(elem,type+(types||""));else{if(types.type){handler=types.handler;types=types.type;}jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];if(events[type]){if(handler)delete events[type][handler.guid];else +for(handler in events[type])if(!parts[1]||events[type][handler].type==parts[1])delete events[type][handler];for(ret in events[type])break;if(!ret){if(!jQuery.event.special[type]||jQuery.event.special[type].teardown.call(elem)===false){if(elem.removeEventListener)elem.removeEventListener(type,jQuery.data(elem,"handle"),false);else if(elem.detachEvent)elem.detachEvent("on"+type,jQuery.data(elem,"handle"));}ret=null;delete events[type];}}});}for(ret in events)break;if(!ret){var handle=jQuery.data(elem,"handle");if(handle)handle.elem=null;jQuery.removeData(elem,"events");jQuery.removeData(elem,"handle");}}},trigger:function(type,data,elem,donative,extra){data=jQuery.makeArray(data);if(type.indexOf("!")>=0){type=type.slice(0,-1);var exclusive=true;}if(!elem){if(this.global[type])jQuery("*").add([window,document]).trigger(type,data);}else{if(elem.nodeType==3||elem.nodeType==8)return undefined;var val,ret,fn=jQuery.isFunction(elem[type]||null),event=!data[0]||!data[0].preventDefault;if(event){data.unshift({type:type,target:elem,preventDefault:function(){},stopPropagation:function(){},timeStamp:now()});data[0][expando]=true;}data[0].type=type;if(exclusive)data[0].exclusive=true;var handle=jQuery.data(elem,"handle");if(handle)val=handle.apply(elem,data);if((!fn||(jQuery.nodeName(elem,'a')&&type=="click"))&&elem["on"+type]&&elem["on"+type].apply(elem,data)===false)val=false;if(event)data.shift();if(extra&&jQuery.isFunction(extra)){ret=extra.apply(elem,val==null?data:data.concat(val));if(ret!==undefined)val=ret;}if(fn&&donative!==false&&val!==false&&!(jQuery.nodeName(elem,'a')&&type=="click")){this.triggered=true;try{elem[type]();}catch(e){}}this.triggered=false;}return val;},handle:function(event){var val,ret,namespace,all,handlers;event=arguments[0]=jQuery.event.fix(event||window.event);namespace=event.type.split(".");event.type=namespace[0];namespace=namespace[1];all=!namespace&&!event.exclusive;handlers=(jQuery.data(this,"events")||{})[event.type];for(var j in handlers){var handler=handlers[j];if(all||handler.type==namespace){event.handler=handler;event.data=handler.data;ret=handler.apply(this,arguments);if(val!==false)val=ret;if(ret===false){event.preventDefault();event.stopPropagation();}}}return val;},fix:function(event){if(event[expando]==true)return event;var originalEvent=event;event={originalEvent:originalEvent};var props="altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode metaKey newValue originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target timeStamp toElement type view wheelDelta which".split(" ");for(var i=props.length;i;i--)event[props[i]]=originalEvent[props[i]];event[expando]=true;event.preventDefault=function(){if(originalEvent.preventDefault)originalEvent.preventDefault();originalEvent.returnValue=false;};event.stopPropagation=function(){if(originalEvent.stopPropagation)originalEvent.stopPropagation();originalEvent.cancelBubble=true;};event.timeStamp=event.timeStamp||now();if(!event.target)event.target=event.srcElement||document;if(event.target.nodeType==3)event.target=event.target.parentNode;if(!event.relatedTarget&&event.fromElement)event.relatedTarget=event.fromElement==event.target?event.toElement:event.fromElement;if(event.pageX==null&&event.clientX!=null){var doc=document.documentElement,body=document.body;event.pageX=event.clientX+(doc&&doc.scrollLeft||body&&body.scrollLeft||0)-(doc.clientLeft||0);event.pageY=event.clientY+(doc&&doc.scrollTop||body&&body.scrollTop||0)-(doc.clientTop||0);}if(!event.which&&((event.charCode||event.charCode===0)?event.charCode:event.keyCode))event.which=event.charCode||event.keyCode;if(!event.metaKey&&event.ctrlKey)event.metaKey=event.ctrlKey;if(!event.which&&event.button)event.which=(event.button&1?1:(event.button&2?3:(event.button&4?2:0)));return event;},proxy:function(fn,proxy){proxy.guid=fn.guid=fn.guid||proxy.guid||this.guid++;return proxy;},special:{ready:{setup:function(){bindReady();return;},teardown:function(){return;}},mouseenter:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseover",jQuery.event.special.mouseenter.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseover",jQuery.event.special.mouseenter.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseenter";return jQuery.event.handle.apply(this,arguments);}},mouseleave:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseout",jQuery.event.special.mouseleave.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseout",jQuery.event.special.mouseleave.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseleave";return jQuery.event.handle.apply(this,arguments);}}}};jQuery.fn.extend({bind:function(type,data,fn){return type=="unload"?this.one(type,data,fn):this.each(function(){jQuery.event.add(this,type,fn||data,fn&&data);});},one:function(type,data,fn){var one=jQuery.event.proxy(fn||data,function(event){jQuery(this).unbind(event,one);return(fn||data).apply(this,arguments);});return this.each(function(){jQuery.event.add(this,type,one,fn&&data);});},unbind:function(type,fn){return this.each(function(){jQuery.event.remove(this,type,fn);});},trigger:function(type,data,fn){return this.each(function(){jQuery.event.trigger(type,data,this,true,fn);});},triggerHandler:function(type,data,fn){return this[0]&&jQuery.event.trigger(type,data,this[0],false,fn);},toggle:function(fn){var args=arguments,i=1;while(i=0){var selector=url.slice(off,url.length);url=url.slice(0,off);}callback=callback||function(){};var type="GET";if(params)if(jQuery.isFunction(params)){callback=params;params=null;}else{params=jQuery.param(params);type="POST";}var self=this;jQuery.ajax({url:url,type:type,dataType:"html",data:params,complete:function(res,status){if(status=="success"||status=="notmodified")self.html(selector?jQuery("
").append(res.responseText.replace(//g,"")).find(selector):res.responseText);self.each(callback,[res.responseText,status,res]);}});return this;},serialize:function(){return jQuery.param(this.serializeArray());},serializeArray:function(){return this.map(function(){return jQuery.nodeName(this,"form")?jQuery.makeArray(this.elements):this;}).filter(function(){return this.name&&!this.disabled&&(this.checked||/select|textarea/i.test(this.nodeName)||/text|hidden|password/i.test(this.type));}).map(function(i,elem){var val=jQuery(this).val();return val==null?null:val.constructor==Array?jQuery.map(val,function(val,i){return{name:elem.name,value:val};}):{name:elem.name,value:val};}).get();}});jQuery.each("ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","),function(i,o){jQuery.fn[o]=function(f){return this.bind(o,f);};});var jsc=now();jQuery.extend({get:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data=null;}return jQuery.ajax({type:"GET",url:url,data:data,success:callback,dataType:type});},getScript:function(url,callback){return jQuery.get(url,null,callback,"script");},getJSON:function(url,data,callback){return jQuery.get(url,data,callback,"json");},post:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data={};}return jQuery.ajax({type:"POST",url:url,data:data,success:callback,dataType:type});},ajaxSetup:function(settings){jQuery.extend(jQuery.ajaxSettings,settings);},ajaxSettings:{url:location.href,global:true,type:"GET",timeout:0,contentType:"application/x-www-form-urlencoded",processData:true,async:true,data:null,username:null,password:null,accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},ajax:function(s){s=jQuery.extend(true,s,jQuery.extend(true,{},jQuery.ajaxSettings,s));var jsonp,jsre=/=\?(&|$)/g,status,data,type=s.type.toUpperCase();if(s.data&&s.processData&&typeof s.data!="string")s.data=jQuery.param(s.data);if(s.dataType=="jsonp"){if(type=="GET"){if(!s.url.match(jsre))s.url+=(s.url.match(/\?/)?"&":"?")+(s.jsonp||"callback")+"=?";}else if(!s.data||!s.data.match(jsre))s.data=(s.data?s.data+"&":"")+(s.jsonp||"callback")+"=?";s.dataType="json";}if(s.dataType=="json"&&(s.data&&s.data.match(jsre)||s.url.match(jsre))){jsonp="jsonp"+jsc++;if(s.data)s.data=(s.data+"").replace(jsre,"="+jsonp+"$1");s.url=s.url.replace(jsre,"="+jsonp+"$1");s.dataType="script";window[jsonp]=function(tmp){data=tmp;success();complete();window[jsonp]=undefined;try{delete window[jsonp];}catch(e){}if(head)head.removeChild(script);};}if(s.dataType=="script"&&s.cache==null)s.cache=false;if(s.cache===false&&type=="GET"){var ts=now();var ret=s.url.replace(/(\?|&)_=.*?(&|$)/,"$1_="+ts+"$2");s.url=ret+((ret==s.url)?(s.url.match(/\?/)?"&":"?")+"_="+ts:"");}if(s.data&&type=="GET"){s.url+=(s.url.match(/\?/)?"&":"?")+s.data;s.data=null;}if(s.global&&!jQuery.active++)jQuery.event.trigger("ajaxStart");var remote=/^(?:\w+:)?\/\/([^\/?#]+)/;if(s.dataType=="script"&&type=="GET"&&remote.test(s.url)&&remote.exec(s.url)[1]!=location.host){var head=document.getElementsByTagName("head")[0];var script=document.createElement("script");script.src=s.url;if(s.scriptCharset)script.charset=s.scriptCharset;if(!jsonp){var done=false;script.onload=script.onreadystatechange=function(){if(!done&&(!this.readyState||this.readyState=="loaded"||this.readyState=="complete")){done=true;success();complete();head.removeChild(script);}};}head.appendChild(script);return undefined;}var requestDone=false;var xhr=window.ActiveXObject?new ActiveXObject("Microsoft.XMLHTTP"):new XMLHttpRequest();if(s.username)xhr.open(type,s.url,s.async,s.username,s.password);else +xhr.open(type,s.url,s.async);try{if(s.data)xhr.setRequestHeader("Content-Type",s.contentType);if(s.ifModified)xhr.setRequestHeader("If-Modified-Since",jQuery.lastModified[s.url]||"Thu, 01 Jan 1970 00:00:00 GMT");xhr.setRequestHeader("X-Requested-With","XMLHttpRequest");xhr.setRequestHeader("Accept",s.dataType&&s.accepts[s.dataType]?s.accepts[s.dataType]+", */*":s.accepts._default);}catch(e){}if(s.beforeSend&&s.beforeSend(xhr,s)===false){s.global&&jQuery.active--;xhr.abort();return false;}if(s.global)jQuery.event.trigger("ajaxSend",[xhr,s]);var onreadystatechange=function(isTimeout){if(!requestDone&&xhr&&(xhr.readyState==4||isTimeout=="timeout")){requestDone=true;if(ival){clearInterval(ival);ival=null;}status=isTimeout=="timeout"&&"timeout"||!jQuery.httpSuccess(xhr)&&"error"||s.ifModified&&jQuery.httpNotModified(xhr,s.url)&&"notmodified"||"success";if(status=="success"){try{data=jQuery.httpData(xhr,s.dataType,s.dataFilter);}catch(e){status="parsererror";}}if(status=="success"){var modRes;try{modRes=xhr.getResponseHeader("Last-Modified");}catch(e){}if(s.ifModified&&modRes)jQuery.lastModified[s.url]=modRes;if(!jsonp)success();}else +jQuery.handleError(s,xhr,status);complete();if(s.async)xhr=null;}};if(s.async){var ival=setInterval(onreadystatechange,13);if(s.timeout>0)setTimeout(function(){if(xhr){xhr.abort();if(!requestDone)onreadystatechange("timeout");}},s.timeout);}try{xhr.send(s.data);}catch(e){jQuery.handleError(s,xhr,null,e);}if(!s.async)onreadystatechange();function success(){if(s.success)s.success(data,status);if(s.global)jQuery.event.trigger("ajaxSuccess",[xhr,s]);}function complete(){if(s.complete)s.complete(xhr,status);if(s.global)jQuery.event.trigger("ajaxComplete",[xhr,s]);if(s.global&&!--jQuery.active)jQuery.event.trigger("ajaxStop");}return xhr;},handleError:function(s,xhr,status,e){if(s.error)s.error(xhr,status,e);if(s.global)jQuery.event.trigger("ajaxError",[xhr,s,e]);},active:0,httpSuccess:function(xhr){try{return!xhr.status&&location.protocol=="file:"||(xhr.status>=200&&xhr.status<300)||xhr.status==304||xhr.status==1223||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpNotModified:function(xhr,url){try{var xhrRes=xhr.getResponseHeader("Last-Modified");return xhr.status==304||xhrRes==jQuery.lastModified[url]||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpData:function(xhr,type,filter){var ct=xhr.getResponseHeader("content-type"),xml=type=="xml"||!type&&ct&&ct.indexOf("xml")>=0,data=xml?xhr.responseXML:xhr.responseText;if(xml&&data.documentElement.tagName=="parsererror")throw"parsererror";if(filter)data=filter(data,type);if(type=="script")jQuery.globalEval(data);if(type=="json")data=eval("("+data+")");return data;},param:function(a){var s=[];if(a.constructor==Array||a.jquery)jQuery.each(a,function(){s.push(encodeURIComponent(this.name)+"="+encodeURIComponent(this.value));});else +for(var j in a)if(a[j]&&a[j].constructor==Array)jQuery.each(a[j],function(){s.push(encodeURIComponent(j)+"="+encodeURIComponent(this));});else +s.push(encodeURIComponent(j)+"="+encodeURIComponent(jQuery.isFunction(a[j])?a[j]():a[j]));return s.join("&").replace(/%20/g,"+");}});jQuery.fn.extend({show:function(speed,callback){return speed?this.animate({height:"show",width:"show",opacity:"show"},speed,callback):this.filter(":hidden").each(function(){this.style.display=this.oldblock||"";if(jQuery.css(this,"display")=="none"){var elem=jQuery("<"+this.tagName+" />").appendTo("body");this.style.display=elem.css("display");if(this.style.display=="none")this.style.display="block";elem.remove();}}).end();},hide:function(speed,callback){return speed?this.animate({height:"hide",width:"hide",opacity:"hide"},speed,callback):this.filter(":visible").each(function(){this.oldblock=this.oldblock||jQuery.css(this,"display");this.style.display="none";}).end();},_toggle:jQuery.fn.toggle,toggle:function(fn,fn2){return jQuery.isFunction(fn)&&jQuery.isFunction(fn2)?this._toggle.apply(this,arguments):fn?this.animate({height:"toggle",width:"toggle",opacity:"toggle"},fn,fn2):this.each(function(){jQuery(this)[jQuery(this).is(":hidden")?"show":"hide"]();});},slideDown:function(speed,callback){return this.animate({height:"show"},speed,callback);},slideUp:function(speed,callback){return this.animate({height:"hide"},speed,callback);},slideToggle:function(speed,callback){return this.animate({height:"toggle"},speed,callback);},fadeIn:function(speed,callback){return this.animate({opacity:"show"},speed,callback);},fadeOut:function(speed,callback){return this.animate({opacity:"hide"},speed,callback);},fadeTo:function(speed,to,callback){return this.animate({opacity:to},speed,callback);},animate:function(prop,speed,easing,callback){var optall=jQuery.speed(speed,easing,callback);return this[optall.queue===false?"each":"queue"](function(){if(this.nodeType!=1)return false;var opt=jQuery.extend({},optall),p,hidden=jQuery(this).is(":hidden"),self=this;for(p in prop){if(prop[p]=="hide"&&hidden||prop[p]=="show"&&!hidden)return opt.complete.call(this);if(p=="height"||p=="width"){opt.display=jQuery.css(this,"display");opt.overflow=this.style.overflow;}}if(opt.overflow!=null)this.style.overflow="hidden";opt.curAnim=jQuery.extend({},prop);jQuery.each(prop,function(name,val){var e=new jQuery.fx(self,opt,name);if(/toggle|show|hide/.test(val))e[val=="toggle"?hidden?"show":"hide":val](prop);else{var parts=val.toString().match(/^([+-]=)?([\d+-.]+)(.*)$/),start=e.cur(true)||0;if(parts){var end=parseFloat(parts[2]),unit=parts[3]||"px";if(unit!="px"){self.style[name]=(end||1)+unit;start=((end||1)/e.cur(true))*start;self.style[name]=start+unit;}if(parts[1])end=((parts[1]=="-="?-1:1)*end)+start;e.custom(start,end,unit);}else +e.custom(start,val,"");}});return true;});},queue:function(type,fn){if(jQuery.isFunction(type)||(type&&type.constructor==Array)){fn=type;type="fx";}if(!type||(typeof type=="string"&&!fn))return queue(this[0],type);return this.each(function(){if(fn.constructor==Array)queue(this,type,fn);else{queue(this,type).push(fn);if(queue(this,type).length==1)fn.call(this);}});},stop:function(clearQueue,gotoEnd){var timers=jQuery.timers;if(clearQueue)this.queue([]);this.each(function(){for(var i=timers.length-1;i>=0;i--)if(timers[i].elem==this){if(gotoEnd)timers[i](true);timers.splice(i,1);}});if(!gotoEnd)this.dequeue();return this;}});var queue=function(elem,type,array){if(elem){type=type||"fx";var q=jQuery.data(elem,type+"queue");if(!q||array)q=jQuery.data(elem,type+"queue",jQuery.makeArray(array));}return q;};jQuery.fn.dequeue=function(type){type=type||"fx";return this.each(function(){var q=queue(this,type);q.shift();if(q.length)q[0].call(this);});};jQuery.extend({speed:function(speed,easing,fn){var opt=speed&&speed.constructor==Object?speed:{complete:fn||!fn&&easing||jQuery.isFunction(speed)&&speed,duration:speed,easing:fn&&easing||easing&&easing.constructor!=Function&&easing};opt.duration=(opt.duration&&opt.duration.constructor==Number?opt.duration:jQuery.fx.speeds[opt.duration])||jQuery.fx.speeds.def;opt.old=opt.complete;opt.complete=function(){if(opt.queue!==false)jQuery(this).dequeue();if(jQuery.isFunction(opt.old))opt.old.call(this);};return opt;},easing:{linear:function(p,n,firstNum,diff){return firstNum+diff*p;},swing:function(p,n,firstNum,diff){return((-Math.cos(p*Math.PI)/2)+0.5)*diff+firstNum;}},timers:[],timerId:null,fx:function(elem,options,prop){this.options=options;this.elem=elem;this.prop=prop;if(!options.orig)options.orig={};}});jQuery.fx.prototype={update:function(){if(this.options.step)this.options.step.call(this.elem,this.now,this);(jQuery.fx.step[this.prop]||jQuery.fx.step._default)(this);if(this.prop=="height"||this.prop=="width")this.elem.style.display="block";},cur:function(force){if(this.elem[this.prop]!=null&&this.elem.style[this.prop]==null)return this.elem[this.prop];var r=parseFloat(jQuery.css(this.elem,this.prop,force));return r&&r>-10000?r:parseFloat(jQuery.curCSS(this.elem,this.prop))||0;},custom:function(from,to,unit){this.startTime=now();this.start=from;this.end=to;this.unit=unit||this.unit||"px";this.now=this.start;this.pos=this.state=0;this.update();var self=this;function t(gotoEnd){return self.step(gotoEnd);}t.elem=this.elem;jQuery.timers.push(t);if(jQuery.timerId==null){jQuery.timerId=setInterval(function(){var timers=jQuery.timers;for(var i=0;ithis.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;var done=true;for(var i in this.options.curAnim)if(this.options.curAnim[i]!==true)done=false;if(done){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;this.elem.style.display=this.options.display;if(jQuery.css(this.elem,"display")=="none")this.elem.style.display="block";}if(this.options.hide)this.elem.style.display="none";if(this.options.hide||this.options.show)for(var p in this.options.curAnim)jQuery.attr(this.elem.style,p,this.options.orig[p]);}if(done)this.options.complete.call(this.elem);return false;}else{var n=t-this.startTime;this.state=n/this.options.duration;this.pos=jQuery.easing[this.options.easing||(jQuery.easing.swing?"swing":"linear")](this.state,n,0,1,this.options.duration);this.now=this.start+((this.end-this.start)*this.pos);this.update();}return true;}};jQuery.extend(jQuery.fx,{speeds:{slow:600,fast:200,def:400},step:{scrollLeft:function(fx){fx.elem.scrollLeft=fx.now;},scrollTop:function(fx){fx.elem.scrollTop=fx.now;},opacity:function(fx){jQuery.attr(fx.elem.style,"opacity",fx.now);},_default:function(fx){fx.elem.style[fx.prop]=fx.now+fx.unit;}}});jQuery.fn.offset=function(){var left=0,top=0,elem=this[0],results;if(elem)with(jQuery.browser){var parent=elem.parentNode,offsetChild=elem,offsetParent=elem.offsetParent,doc=elem.ownerDocument,safari2=safari&&parseInt(version)<522&&!/adobeair/i.test(userAgent),css=jQuery.curCSS,fixed=css(elem,"position")=="fixed";if(elem.getBoundingClientRect){var box=elem.getBoundingClientRect();add(box.left+Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),box.top+Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));add(-doc.documentElement.clientLeft,-doc.documentElement.clientTop);}else{add(elem.offsetLeft,elem.offsetTop);while(offsetParent){add(offsetParent.offsetLeft,offsetParent.offsetTop);if(mozilla&&!/^t(able|d|h)$/i.test(offsetParent.tagName)||safari&&!safari2)border(offsetParent);if(!fixed&&css(offsetParent,"position")=="fixed")fixed=true;offsetChild=/^body$/i.test(offsetParent.tagName)?offsetChild:offsetParent;offsetParent=offsetParent.offsetParent;}while(parent&&parent.tagName&&!/^body|html$/i.test(parent.tagName)){if(!/^inline|table.*$/i.test(css(parent,"display")))add(-parent.scrollLeft,-parent.scrollTop);if(mozilla&&css(parent,"overflow")!="visible")border(parent);parent=parent.parentNode;}if((safari2&&(fixed||css(offsetChild,"position")=="absolute"))||(mozilla&&css(offsetChild,"position")!="absolute"))add(-doc.body.offsetLeft,-doc.body.offsetTop);if(fixed)add(Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));}results={top:top,left:left};}function border(elem){add(jQuery.curCSS(elem,"borderLeftWidth",true),jQuery.curCSS(elem,"borderTopWidth",true));}function add(l,t){left+=parseInt(l,10)||0;top+=parseInt(t,10)||0;}return results;};jQuery.fn.extend({position:function(){var left=0,top=0,results;if(this[0]){var offsetParent=this.offsetParent(),offset=this.offset(),parentOffset=/^body|html$/i.test(offsetParent[0].tagName)?{top:0,left:0}:offsetParent.offset();offset.top-=num(this,'marginTop');offset.left-=num(this,'marginLeft');parentOffset.top+=num(offsetParent,'borderTopWidth');parentOffset.left+=num(offsetParent,'borderLeftWidth');results={top:offset.top-parentOffset.top,left:offset.left-parentOffset.left};}return results;},offsetParent:function(){var offsetParent=this[0].offsetParent;while(offsetParent&&(!/^body|html$/i.test(offsetParent.tagName)&&jQuery.css(offsetParent,'position')=='static'))offsetParent=offsetParent.offsetParent;return jQuery(offsetParent);}});jQuery.each(['Left','Top'],function(i,name){var method='scroll'+name;jQuery.fn[method]=function(val){if(!this[0])return;return val!=undefined?this.each(function(){this==window||this==document?window.scrollTo(!i?val:jQuery(window).scrollLeft(),i?val:jQuery(window).scrollTop()):this[method]=val;}):this[0]==window||this[0]==document?self[i?'pageYOffset':'pageXOffset']||jQuery.boxModel&&document.documentElement[method]||document.body[method]:this[0][method];};});jQuery.each(["Height","Width"],function(i,name){var tl=i?"Left":"Top",br=i?"Right":"Bottom";jQuery.fn["inner"+name]=function(){return this[name.toLowerCase()]()+num(this,"padding"+tl)+num(this,"padding"+br);};jQuery.fn["outer"+name]=function(margin){return this["inner"+name]()+num(this,"border"+tl+"Width")+num(this,"border"+br+"Width")+(margin?num(this,"margin"+tl)+num(this,"margin"+br):0);};});})(); \ No newline at end of file diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/www/layout.css b/DJAGEN/tags/djagen_old/djagen/gezegen/www/layout.css new file mode 100755 index 0000000..f93cc40 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/www/layout.css @@ -0,0 +1,115 @@ +/* body */ + +body { + margin: 0px; + background-color: white; + color: black; +} + +/* header */ + +#banner { + background: url('images/hdr-planet.png') no-repeat; + background-color: #9db8d2; + background-position: right; + border-bottom: 1px solid #807d74; + height: 48px; +} + +#logo { + position: absolute; + top: 25px; + left: 15px; + background: url('images/logo.png') no-repeat; + width: 64px; + height: 54px; +} + +#logo images { + border: 0px; + width: 64px; + height: 64px; +} + +#hdrNav { + margin-top: 6px; + margin-left: 84px; + margin-right: 190px; + padding-right: 3em; + font-size: small; +} + +#hdrNav a { + color: #000000; +} + +#body { + margin: 0 190px 0 0; + padding: 1.5em 3em 0em 1em; +} + +#body *:first-child { + margin-top: 0; +} + +#copyright { + clear: both; + padding-bottom: 1em; + text-align: center; + font-size: small; + color: #aaaaaa; +} + +#copyright a { + color: #c0c0c0; +} + +#copyright a:visited { + color: #c0c0c0; +} + + +/* SIDEBAR */ + +#sidebar { + position: absolute; + top: 80px; + right: 0px; + /*width: 210px;*/ + border-left: 1px solid #ffffff; + background-color: #eeeeee; +} + +#sidebar div.section { + width: 190px; + padding: 1em; + border-top: 1px solid #ffffff; + border-bottom: 1px solid #d9d9d9; +} + +#sidebar div.section h3 { + font-weight: bold; + font-size: 110%; +} + +#sidebar *:first-child { + margin-top: 0; +} + +#sidebar *:last-child { + margin-bottom: 0; +} + +#sidebar div.section ul { + padding: 0; + list-style-type: none; +} + +#sidebar div.section ul ul { + padding-left: 1.5em; + list-style-type: square; +} + +#sidebar div.section p { + font-size: small; +} diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/www/nlayout.css b/DJAGEN/tags/djagen_old/djagen/gezegen/www/nlayout.css new file mode 100755 index 0000000..72be5ec --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/www/nlayout.css @@ -0,0 +1,316 @@ +body { + margin: 0px; + padding: 0px; + font-family: sans-serif; + background-color: white; + color: black; +} + +/* GEZEGEN strip */ + +#gezegen-sites { + list-style: none; + background: #2E3436 url(img/sites-bg.png) 0 100% repeat-x; + text-align: right; + padding: 0 1ex; + margin: 0; + font-size: 75%; +} + +#gezegen-sites ul { + margin: 0; + padding: 0; +} + +#gezegen-sites li { + display: inline; + background: url(img/sites-sp.png) 0 0 no-repeat; + padding-top: 10px; + padding-bottom: 8px; + margin-left: 0px; + margin-top: 0px; +} + +#gezegen-sites li a { + font-weight: bold; + color: #FFFFFF; + margin: 0 2ex; + text-decoration: none; + line-height: 30px; +} + +#gezegen-sites li a:hover { + text-decoration: underline; +} + +#gezegen-sites .home { + float: left; + background: url(img/sites-sp.png) 100% 0 no-repeat; + padding-top: 0; + padding-bottom: 0; +} + +#gezegen-sites .home a { + float: left; + margin-left: 0; + padding-left: 27px; +} + +/* Site header and masthead */ + +#header { + position: relative; + width: 100%; + background-color: #729FCF; +} + +#masthead { + display: table; + /* req for ie */ + border-top: 1px solid #729FCF; +} + +#site-logo { + vertical-align: middle; + display: table-cell; + float: left; + border: 0; + padding: 10px; + /* req for ie */ + margin-top: expression((this.parentElement.height - this.height)/2); +} + +#site-title { + vertical-align: middle; + display: table-cell; + /* req for ie */ + margin-top: expression((this.parentElement.height - this.height)/2); +} + +#site-name { + margin: 0; +} + +#site-name a { + font-size: xx-large; + font-weight: bold; + text-decoration: none; + color: black; +} + +#site-slogan { + font-size: 80%; + font-style: italic; + margin: 0; +} + +#footer-link { + position: absolute; + right: 1em; + bottom: 1em; + margin: 0; + font-size: 80%; + color: black; + text-decoration: none; + background: url(img/help-about.png) left no-repeat; + padding-left: 20px; +} +#footer-link:hover { text-decoration: underline; } + +div.breadcrumb { + font-size: 75%; +} + +/* Search form */ + +#search { + position: relative; + float: right; + top: 1em; + right: 1em; +} + +#search input.form-text, #search input[name="q"] { + border: 1px solid #888888; + padding: 0.5ex; + background-position: center !important; +} + +#search input.form-submit, #search input[name="sa"] { + background: white url(img/search-icon.gif) no-repeat; + padding: 1px 1px 1px 15px; + border: 1px solid #888888; + display: none; +} + +/* Tabs */ +#site-tabs { + position: absolute; + right: 0px; + bottom: 0px; + width: 100%; + background: transparent url(img/bar.png) 0 100% repeat-x; + margin: 0; + padding: 0; +} + +#site-tabs ul { + float: right; + list-style: none; + margin: 0; + margin-right: 3ex; + font-size: 75%; + clear: none; +} + +#site-tabs ul li { + float: left; + margin: 0; + margin-left: 0.2ex; +} + +#site-tabs ul li a:hover { + color: #111111; +} + +#site-tabs ul li a { + float: left; + text-decoration: none; + color: #555555; + background: #eeeeee; + padding: 7px 7px 7px 7px; + border-bottom: 2px solid #CCCCCC; +} + +#site-tabs ul li a.active { + color: #3566A5; + background: white; + border-top: 2px solid #5555ff; + border-bottom: 2px solid white; +} + +/* Content */ +#content { + margin: 0px auto 0px auto; + padding: 0px 1em 0px 1em; + max-width: 65em; +} + +#content h1.title { + margin: 0; +} + +/* Feeds & Footer */ +#feeds { + background: #dcdcdc url(img/feeds-bg.png) repeat-x left top; + padding: 0.5em 0px 0.5em 0px; +} +#feeds h3 { + margin: 0px; + padding: 0px 3% 0px 3%; + font-size: 100%; +} +#feeds h3 a { + background: transparent url(img/dt-closed.png) no-repeat left top; + padding-left: 20px; + margin-left: -20px; + color: #000; + text-decoration: none; +} +#feeds h3.open a { + background: transparent url(img/dt-open.png) no-repeat left top; +} +#feedlist { + display: none; + margin: 0.5em 1em 0.5em 1em; + background-color: #eee; + -moz-border-radius: 1em; + padding: 1em; + column-count: 1; + column-gap: 1em; + -moz-column-count: 1; + -moz-column-gap: 1em; + -webkit-column-count: 1; + -webkit-column-gap: 1em; +} +#feedlist ul { + margin: 0px; + padding: 0px; + list-style-type: none; + font-size: 90%; +} +#feedlist ul li * { + vertical-align: middle; +} +#feedlist ul li input { + margin: 0.2em; +} +#feedlist ul li a { + color: #000; + text-decoration: none; +} +#feedlist ul li a:hover { + text-decoration: underline; +} +#feedlist ul li a.message { + color: #999; +} +#feedlist ul li a img { + margin: 0px 0.2em; + border: 0px; +} + +#footer { + background: black url(img/footer-bg.png) repeat-x left top; + padding: 1%; + font-size: x-small; + color: #ccc; + overflow: hidden; + line-height: 150%; +} + +#footer a { + color: #000000; + font-weight: bold; + text-decoration: none; +} +#footer a:hover { + text-decoration: underline; +} + +#footer .column { + float: left; + width: 20%; + margin-right: 3%; +} + +#footer .section { + margin-bottom: 1em; +} + +#footer .section h3 { + margin: 0; + font-size: 140%; +} + +#footer .section a img { + border: 1px solid #cccccc; +} + +#footer .section ul { + list-style: none; + margin-left: 0; + padding-left: 0; +} + +#fineprint { + display: inline; + float: right; + text-align: right; + width: 25%; +} + +#ownership { + margin-top: 2em; + font-size: 90%; +} diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/www/opml.xml b/DJAGEN/tags/djagen_old/djagen/gezegen/www/opml.xml new file mode 100755 index 0000000..04007df --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/www/opml.xml @@ -0,0 +1,15 @@ + + + + Linux Gezegeni + Mon, 10 May 2010 18:17:43 +0000 + Mon, 10 May 2010 18:17:43 +0000 + Gezegen Ekibi + gezegen@linux.org.tr + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/www/planet.css b/DJAGEN/tags/djagen_old/djagen/gezegen/www/planet.css new file mode 100755 index 0000000..16af408 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/www/planet.css @@ -0,0 +1,134 @@ +div.entry { + clear: both; + margin-bottom: 2em; + margin-right: 1em; +} + +.post-contents img { padding: 10px; } + +div.person-info { + float: left; + position: relative; + left: 4px; + margin-top: 25px!important; + padding: 0 20px 30px 0; + width: 120px; + background: url(images/bubble/bubble-nipple.png) top right no-repeat; + text-align: center; +} + +div.person-info a { + text-decoration: none; + color: #666; +} + +div.post { + background: #fff url(images/bubble/bubble-left.png) left repeat-y; + margin-left: 140px; +} + +div.post2 { + background: url(images/bubble/bubble-right.png) right repeat-y; +} + +div.post-contents { + padding: 0 25px 0 25px; + margin-right: 10px; +} + +div.post-contents p { + line-height: 140%; + margin-top: 1em!important; +} + +div.post-contents blockquote { + color: #666; + line-height: 150%; +} + +div.post-contents:after { + content: ""; + display: block; + clear: both; +} + +h4.post-title, div.post-title { + background: url(images/bubble/bubble-top-left.png) top left no-repeat; + margin: 1em 0 0 0; +} + +h4.post-title a, div.post-title span { + display: block; + background: url(images/bubble/bubble-top-right.png) top right no-repeat; + padding: 22px 25px 0 25px; + font-weight: normal; + font-size: 140%; + text-decoration: none; +} + +h4.post-title a:hover { + text-decoration: underline; +} + +div.post-title span { + display: block; + height: 20px; + font-size: 90%; +} + +div.post-title { + display: block; +} + +div.post-header { + background: url(images/bubble/bubble-top.png) top repeat-x; +} + + +div.post-footer { + background: url(images/bubble/bubble-bottom.png) bottom repeat-x; +} + +div.post-footer p { + background: url(images/bubble/bubble-bottom-left.png) bottom left no-repeat; + margin: 0; +} + +div.post-footer p a { + display: block; + background: url(images/bubble/bubble-bottom-right.png) bottom right no-repeat; + padding: 15px 20px 20px 25px; + text-align: right; + font-size: 85%; + color: #999; + text-decoration: none; +} + +div.post-footer p a:hover { + color: inherit; + text-decoration: underline; +} + +h2.date { + color: #666; + font-weight: normal; + font-size: 130%; + padding-left: 9px; +} + +#sidebar ul li { + font-size: small; +} + +#sidebar ul li a { + text-decoration: none; +} + +#sidebar ul li a:hover { + text-decoration: underline; +} + +#sidebar .message { + cursor: help; + color: #666; +} diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/www/rss10.xml b/DJAGEN/tags/djagen_old/djagen/gezegen/www/rss10.xml new file mode 100755 index 0000000..ba355e4 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/www/rss10.xml @@ -0,0 +1,223 @@ + + + + Linux Gezegeni + http://gezegen.linux.org.tr + Linux Gezegeni - http://gezegen.linux.org.tr + + + + + + + + + + + + + + + + + + + + + Oğuz Yarımtepe: Yakın Doğu’da Seminer Rüzgarları + http://feedproxy.google.com/~r/oguzy-gezegen/~3/dmDtp8fRToI/ + <p>Geçen haftadan beri Yakın Doğu Üniversitesi&#8217;nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir&#8217;lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu&#8217;nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.</p> +<p>Yakın Doğu&#8217;ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697.jpg"><img class="alignnone size-medium wp-image-99" title="Linux Nedir Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704.jpg"><img class="alignnone size-medium wp-image-100" title="Linux Nedir Seminer" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757.jpg"><img class="alignnone size-medium wp-image-101" title="YDU AEK Internet'in Yapı Taşları Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Ali Erdinc&#8217;in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759.jpg"><img class="alignnone size-medium wp-image-102" title="Internet'in Yapı Taşları" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite <a href="http://duyuru.neu.edu.tr">duyuru sayfası</a>, <a href="http://www.facebook.com/NearEastUniversity">Facebook</a> ve <a href="http://twitter.com/NearEastUniv">Twitter</a>&#8216;dan takip edebileceklerini söyleyelim. Hatta Kıbrıs&#8217;ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.</p> +<p>Lefkoşa&#8217;ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs&#8217;tan bildirdi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770.jpg"><img class="alignnone size-medium wp-image-103" title="Inovasyon Merkezi, tarla" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770-300x199.jpg" alt="" width="300" height="199" /></a></p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/dmDtp8fRToI" height="1" width="1" /> + Tue, 16 Mar 2010 17:40:18 +0000 + + + Oğuz Yarımtepe: 100 ml + http://feedproxy.google.com/~r/oguzy-gezegen/~3/nubepmpaYEk/ + <p>1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs&#8217;a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.</p> +<p>Şimdi olayın benim açımdan garip noktalarına gelelim</p> +<p>* Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.</p> +<p>* Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.</p> +<p>* Elimle çöpe attım, o çok koydu.</p> +<p>Ben de bunun üzerine Ulaştırma Bakanlığı&#8217;na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006&#8242;da İngiltere&#8217;de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006&#8242;da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç&#8217;te, ABD ve Kanada&#8217;da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt&#8217;lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:</p> +<p>&#8220;Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.&#8221;</p> +<p>Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.</p> +<p>Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.</p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/nubepmpaYEk" height="1" width="1" /> + Fri, 05 Feb 2010 12:19:21 +0000 + + + Hakan Uygun: Artık Sun yok! + http://www.hakanuygun.com/blog/?p=432 + <p>iP<a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif"><img class="alignleft size-full wp-image-434" title="sunoracle" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif" alt="sunoracle" width="202" height="122" /></a>ad haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun&#8217;ı satın alma işlemini bitirdi. Artık <a href="http://www.sun.com" target="_blank">www.sun.com</a> adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.</p> +<p>Beni en çok ilgilendiren konular ise Sun&#8217;ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.</p> +<p>Umarım hepimiz için mutlu son olur&#8230;</p> +<p><strong>Ek</strong>: <a href="http://www.kulturmantari.org/" target="_blank">Kültür Mantarı</a>&#8216;nın yönlendirmesi ile <a href="http://blogs.sun.com/jag/entry/so_long_old_friend" target="_blank">James Gosling&#8217;</a>in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım&#8230;</p> +<p><a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip.jpg"><img class="aligncenter size-medium wp-image-445" title="sunrip" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip-300x234.jpg" alt="sunrip" width="300" height="234" /></a></p> + Fri, 29 Jan 2010 09:28:25 +0000 + + + Hakan Uygun: EMO 13. Ulusal Kongresi + http://www.hakanuygun.com/blog/?p=381 + <p>EMO&#8217;nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan <a href="http://www.ulusalkongre.org" target="_blank">13. Ulusal Kongre</a>si kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda &#8220;Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz&#8221; ve 11.30-12.30 arasında da &#8220;Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.</p> +<p>Genel olarak yüklü bir programı olan bu etkinlikte çeşitli <a href="http://haber.linux.org.tr/2009/12/23-26-aralik-emo-ulusal-kongre-ankara-linux-seminerleri/" target="_blank">LKD seminerleri</a> de olacak. Buyrunuz geliniz!</p> + Thu, 24 Dec 2009 15:45:26 +0000 + + + Hakan Uygun: Intel, Atom, Moblin + http://www.hakanuygun.com/blog/?p=338 + <p>Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel&#8217;e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin&#8217;i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel&#8217;den üç önemli açıklama oldu&#8230;</p> +<p>Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. <a href="http://appdeveloper.intel.com/en-us/">Atom Developer Program</a>&#8216;ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var&#8230; ( Ben kayıt olacağım <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> )</p> +<p>İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin&#8217;in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir <a href="http://www.engadget.com/2009/09/22/intel-announces-moblin-2-1-for-phones/#continued">akıllı telefon</a> üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Geçenlerde de <a href="http://www.hakanuygun.com/blog/?p=279">yazmıştım</a>,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız&#8230;</p> + Thu, 24 Sep 2009 09:00:51 +0000 + + + Hakan Uygun: Teknik Destek Kopya Kağıtı + http://www.hakanuygun.com/blog/?p=330 + <p>xkcd&#8217;de geçen gün yayınlanan <a href="http://xkcd.com/627/">bu</a> teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.</p> +<p><img class="aligncenter size-full wp-image-331" title="teknikdestek" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.png" alt="teknikdestek" width="468" height="461" /><br /> +İsteyenler için ODF hali de <a href="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.odg">burada</a></p> + Tue, 25 Aug 2009 07:28:26 +0000 + + + Hakan Uygun: Korsan Değil “Fikir Hırsızı” + http://www.hakanuygun.com/blog/?p=312 + <p>Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu&#8217;nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma <a href="http://www.ntv.com.tr/id/24992251/" target="_blank">başlatmış</a>. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.</p> +<p>Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor&#8230;</p> +<p>Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.</p> +<p>Son olarak bir haber daha verelim Pirate Bay&#8217;in 23 GB&#8217;lik arşivi de <a href="http://thepiratebay.org/torrent/5053827" target="_blank">paylaşıma</a> açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Korsan değil özgür yazılım kullanın!</p> + Tue, 18 Aug 2009 08:07:07 +0000 + + + Hakan Uygun: Mobil Cihazlar Dünyasında Neler Oluyor? + http://www.hakanuygun.com/blog/?p=279 + <p><img class="aligncenter size-full wp-image-282" title="moblin" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/07/moblin.jpg" alt="moblin" width="280" height="151" />Bir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. <a href="http://en.wikipedia.org/wiki/Android_os" target="_blank">Android</a>, <a href="http://en.wikipedia.org/wiki/WebOS" target="_blank">WebOS</a> ve <a href="http://en.wikipedia.org/wiki/IPhone_OS" target="_blank">iPhone OS</a>&#8216;a  karşı <a href="http://en.wikipedia.org/wiki/Symbian_OS" target="_blank">Symbian</a>&#8216;ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan&#8217;ı açık kaynak kodlu olarak  bu vakfa devretmişti.</p> +<p>Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC&#8217;lerin geliştirilmesine olanak sağladı ve NetBook&#8217;lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.</p> +<p>Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : <a href="http://en.wikipedia.org/wiki/Moblin" target="_blank">Moblin</a>.</p> +<p>Moblin&#8217;e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia&#8217;nın kendi tabletlerinde kullanmak amacıyla ürettiği <a href="http://en.wikipedia.org/wiki/Maemo_%28operating_system%29" target="_blank">Maemo</a>&#8216;yu desteklemeye karar verdiğini açıkladı. Intel&#8217;de Moblin&#8217;i Linux Vakfı&#8217;na devrettiğini ve destek konusunda da Novell&#8217;le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia&#8217;nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo&#8217;yu Qt&#8217;ye taşıyacağını ilan etti.</p> +<p>İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo&#8217;yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID&#8217;ler üretip bunlarda Mameo&#8217;mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian&#8217;ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID&#8217;ler konusunda neler planlıyor? Bu planları içerisinde Moblin&#8217;i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo&#8217;ya yatırım mı yapacaklar? NetBook&#8217;larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?</p> +<p>Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?</p> + Tue, 07 Jul 2009 11:04:23 +0000 + + + Hakan Uygun: LKD Genel Kurulu için Ankara’ya + http://www.hakanuygun.com/blog/?p=259 + <p>Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara&#8217;ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan&#8217;dan&#8230;</p> +<p>***</p> +<p>Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,</p> +<p>TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca<br /> +kurum.<br /> +Evet bu kurum malesef bilet satmak istemiyor.</p> +<p>1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir<br /> +sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları<br /> +portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)</p> +<p>2- web sitesindeki bilet <span id="OBJ_PREFIX_DWT255" class="Object"><span id="OBJ_PREFIX_DWT256" class="Object">sat</span></span>ış uygulamasında banka sıra makinelerinin bir<br /> +türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk<br /> +karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin<br /> +bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir<br /> +koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve<br /> +internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.<br /> +(İnternet olsun dedim, sonuç yok dedi.)</p> +<p>3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,<br /> +veee&#8230; Evet, biz <span id="OBJ_PREFIX_DWT257" class="Object"><span id="OBJ_PREFIX_DWT258" class="Object">sat</span></span>ıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii<br /> +ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.</p> +<p>4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize<br /> +bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş<br /> +dönüş 1 tam 1 öğrenci istiyorum. <span id="OBJ_PREFIX_DWT259" class="Object"><span id="OBJ_PREFIX_DWT260" class="Object">Sat</span></span>ıcı önce<br /> +- G/D kesmiyorum diyor buradan.!<br /> +- Nasıl yani?<br /> +- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?<br /> +Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.<br /> +- Elbette G/D niye alayım indirim var diyorum.<br /> +Neyse girip deniyor, gelen koltuk numaralarını soruyorum.<br /> +- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)<br /> +- Değiştiremiyor musunuz?<br /> +- Malesef.<br /> +- Internet sürümüne mi giriyorsunuz diyorum ister istemez.<br /> +- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen<br /> +üzerine ek komisyon ekleniyor sadece.)<br /> +- Kim koltuk seçtiriyor bana ?<br /> +- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.</p> +<p>5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.<br /> +Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk<br /> +seçebiliyor musunuz?<br /> +- Bakalım yer boş olursa seçebiliriz diyor <span id="OBJ_PREFIX_DWT261" class="Object"><span id="OBJ_PREFIX_DWT262" class="Object">sat</span></span>ıcı bu kez.<br /> +- Ohh nihayet.<br /> +- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.<br /> +- Öğrencide G/D farkı yok cevabı geliyor.<br /> +- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız<br /> +öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe<br /> +yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat<br /> +eder.)<br /> +- Kredi kartımı, peşin mi?<br /> +- DIINN ! kredi kartı.. var dimi?<br /> +- Evet, 112 TL<br /> +- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.</p> +<p>Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye<br /> +kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak<br /> +bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.</p> +<p>Velhasıl,<br /> +Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX<br /> +Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX</p> +<p>Hayırlı yolculuklar.</p> +<p>=====================<br /> +Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor<br /> +daha. 2-3 nolarda <span id="OBJ_PREFIX_DWT263" class="Object"><span id="OBJ_PREFIX_DWT264" class="Object">sat</span></span>ılan yerler var.</p> +<p>Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya<br /> +<span id="OBJ_PREFIX_DWT265" class="Object">sat</span>ış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi<br /> +bekliyor olabilir, kimbilir?</p> +<p>Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?</p> + Wed, 17 Jun 2009 21:33:17 +0000 + + + Hakan Uygun: IE, WTW ve Gıda Yardımı + http://www.hakanuygun.com/blog/?p=248 + <p><a href="http://walktheweb.wfp.org/" target="_blank"><img class="aligncenter size-full wp-image-252" title="wfp-wtw" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/06/wfp-wtw.png" alt="wfp-wtw" width="512" height="240" /></a>Bugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft&#8217;un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8&#8242;in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara <a href="http://www.browserforthebetter.com/download.html" target="_blank">buradan</a> ulaşabilirsiniz&#8230;</p> +<p>Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin <a href="http://www.techcrunch.com/" target="_blank">TechCrunch</a>&#8216;da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.</p> +<p>İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin&#8230; Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı&#8217;nın <a href="http://walktheweb.wfp.org/" target="_blank">Walk The Web</a> kampanyasına bir göz atmanızı öneririm&#8230;</p> +<ul> +<li><a href="http://www.wfp.org/" target="_blank">www.wfp.org</a></li> +<li><a href="http://www.actionagainsthunger.org/" target="_blank">www.actionagainsthunger.org</a></li> +<li><a href="http://www.hakanuygun.com/blog/www.makepovertyhistory.org" target="_blank">www.makepovertyhistory.org</a></li> +<li><a href="http://www.standagainstpoverty.org" target="_blank">www.standagainstpoverty.org</a></li> +<li><a href="http://www.engineersagainstpoverty.org" target="_blank">www.engineersagainstpoverty.org</a></li> +<li><a href="http://www.whiteband.org" target="_blank">www.whiteband.org</a></li> +</ul> +<p>Son olarak da bugünlerde herkese önerdiğim gibi <a href="http://www.facebook.com/ext/share.php?sid=107634228486&h=FwnnE&u=6crnv&ref=mf" target="_blank">Yuva</a> ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.</p> + Tue, 16 Jun 2009 11:38:02 +0000 + + + Hakan Uygun: TBD Bilişim Kongresi’nde Özgür Yazılım Paneli + http://www.hakanuygun.com/blog/?p=244 + <p>TBD&#8217;nin bu yıl 3.sünü düzenlediği <a href="http://www.istanbulbilisimkongresi.org.tr/" target="_blank">İstanbul Bilişim Kongresi</a>&#8216;nde Pazar günü saat 14:00&#8242;de Özgür Yazılım Paneli olacaktır. Panel&#8217;de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur&#8230;</p> +<p><strong> Yer:</strong> Marmara Üniversitesi Nişantaşı Kampüsü<br /> +Erdal İnönü Bilim ve Kültür Merkezi<br /> +<strong>Tarih:</strong> 31 Mayıs Pazar, 14:00 - 15:20<br /> +<strong>Oturum başkanı:</strong> Görkem Çetin<br /> +<strong>Konuşmacılar:</strong> Enver Altın, Hakan Uygun, Cahit Cengizhan</p> + Thu, 28 May 2009 16:22:08 +0000 + + + Hakan Uygun: Sıralama Algoritmaları + http://www.hakanuygun.com/blog/?p=231 + <p>Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu <a href="http://www.sorting-algorithms.com/" target="_blank">siteye</a> bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz&#8230;</p> + Mon, 13 Apr 2009 08:20:53 +0000 + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/www/rss20.xml b/DJAGEN/tags/djagen_old/djagen/gezegen/www/rss20.xml new file mode 100755 index 0000000..0bf39a9 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/www/rss20.xml @@ -0,0 +1,228 @@ + + + + + Linux Gezegeni + http://gezegen.linux.org.tr + en + Linux Gezegeni - http://gezegen.linux.org.tr + + + Oğuz Yarımtepe: Yakın Doğu’da Seminer Rüzgarları + http://www.loopbacking.info/blog/?p=98 + http://feedproxy.google.com/~r/oguzy-gezegen/~3/dmDtp8fRToI/ + + ]]> +<p>Geçen haftadan beri Yakın Doğu Üniversitesi&#8217;nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir&#8217;lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu&#8217;nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.</p> +<p>Yakın Doğu&#8217;ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697.jpg"><img class="alignnone size-medium wp-image-99" title="Linux Nedir Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704.jpg"><img class="alignnone size-medium wp-image-100" title="Linux Nedir Seminer" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757.jpg"><img class="alignnone size-medium wp-image-101" title="YDU AEK Internet'in Yapı Taşları Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Ali Erdinc&#8217;in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759.jpg"><img class="alignnone size-medium wp-image-102" title="Internet'in Yapı Taşları" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite <a href="http://duyuru.neu.edu.tr">duyuru sayfası</a>, <a href="http://www.facebook.com/NearEastUniversity">Facebook</a> ve <a href="http://twitter.com/NearEastUniv">Twitter</a>&#8216;dan takip edebileceklerini söyleyelim. Hatta Kıbrıs&#8217;ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.</p> +<p>Lefkoşa&#8217;ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs&#8217;tan bildirdi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770.jpg"><img class="alignnone size-medium wp-image-103" title="Inovasyon Merkezi, tarla" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770-300x199.jpg" alt="" width="300" height="199" /></a></p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/dmDtp8fRToI" height="1" width="1" /> + Tue, 16 Mar 2010 17:40:18 +0000 + + + Oğuz Yarımtepe: 100 ml + http://www.loopbacking.info/blog/?p=95 + http://feedproxy.google.com/~r/oguzy-gezegen/~3/nubepmpaYEk/ + + ]]> +<p>1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs&#8217;a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.</p> +<p>Şimdi olayın benim açımdan garip noktalarına gelelim</p> +<p>* Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.</p> +<p>* Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.</p> +<p>* Elimle çöpe attım, o çok koydu.</p> +<p>Ben de bunun üzerine Ulaştırma Bakanlığı&#8217;na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006&#8242;da İngiltere&#8217;de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006&#8242;da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç&#8217;te, ABD ve Kanada&#8217;da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt&#8217;lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:</p> +<p>&#8220;Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.&#8221;</p> +<p>Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.</p> +<p>Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.</p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/nubepmpaYEk" height="1" width="1" /> + Fri, 05 Feb 2010 12:19:21 +0000 + + + Hakan Uygun: Artık Sun yok! + http://www.hakanuygun.com/blog/?p=432 + http://www.hakanuygun.com/blog/?p=432 + +<p>iP<a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif"><img class="alignleft size-full wp-image-434" title="sunoracle" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif" alt="sunoracle" width="202" height="122" /></a>ad haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun&#8217;ı satın alma işlemini bitirdi. Artık <a href="http://www.sun.com" target="_blank">www.sun.com</a> adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.</p> +<p>Beni en çok ilgilendiren konular ise Sun&#8217;ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.</p> +<p>Umarım hepimiz için mutlu son olur&#8230;</p> +<p><strong>Ek</strong>: <a href="http://www.kulturmantari.org/" target="_blank">Kültür Mantarı</a>&#8216;nın yönlendirmesi ile <a href="http://blogs.sun.com/jag/entry/so_long_old_friend" target="_blank">James Gosling&#8217;</a>in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım&#8230;</p> +<p><a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip.jpg"><img class="aligncenter size-medium wp-image-445" title="sunrip" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip-300x234.jpg" alt="sunrip" width="300" height="234" /></a></p> + Fri, 29 Jan 2010 09:28:25 +0000 + + + Hakan Uygun: EMO 13. Ulusal Kongresi + http://www.hakanuygun.com/blog/?p=381 + http://www.hakanuygun.com/blog/?p=381 + +<p>EMO&#8217;nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan <a href="http://www.ulusalkongre.org" target="_blank">13. Ulusal Kongre</a>si kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda &#8220;Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz&#8221; ve 11.30-12.30 arasında da &#8220;Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.</p> +<p>Genel olarak yüklü bir programı olan bu etkinlikte çeşitli <a href="http://haber.linux.org.tr/2009/12/23-26-aralik-emo-ulusal-kongre-ankara-linux-seminerleri/" target="_blank">LKD seminerleri</a> de olacak. Buyrunuz geliniz!</p> + Thu, 24 Dec 2009 15:45:26 +0000 + + + Hakan Uygun: Intel, Atom, Moblin + http://www.hakanuygun.com/blog/?p=338 + http://www.hakanuygun.com/blog/?p=338 + +<p>Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel&#8217;e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin&#8217;i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel&#8217;den üç önemli açıklama oldu&#8230;</p> +<p>Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. <a href="http://appdeveloper.intel.com/en-us/">Atom Developer Program</a>&#8216;ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var&#8230; ( Ben kayıt olacağım <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> )</p> +<p>İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin&#8217;in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir <a href="http://www.engadget.com/2009/09/22/intel-announces-moblin-2-1-for-phones/#continued">akıllı telefon</a> üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Geçenlerde de <a href="http://www.hakanuygun.com/blog/?p=279">yazmıştım</a>,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız&#8230;</p> + Thu, 24 Sep 2009 09:00:51 +0000 + + + Hakan Uygun: Teknik Destek Kopya Kağıtı + http://www.hakanuygun.com/blog/?p=330 + http://www.hakanuygun.com/blog/?p=330 + +<p>xkcd&#8217;de geçen gün yayınlanan <a href="http://xkcd.com/627/">bu</a> teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.</p> +<p><img class="aligncenter size-full wp-image-331" title="teknikdestek" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.png" alt="teknikdestek" width="468" height="461" /><br /> +İsteyenler için ODF hali de <a href="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.odg">burada</a></p> + Tue, 25 Aug 2009 07:28:26 +0000 + + + Hakan Uygun: Korsan Değil “Fikir Hırsızı” + http://www.hakanuygun.com/blog/?p=312 + http://www.hakanuygun.com/blog/?p=312 + +<p>Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu&#8217;nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma <a href="http://www.ntv.com.tr/id/24992251/" target="_blank">başlatmış</a>. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.</p> +<p>Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor&#8230;</p> +<p>Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.</p> +<p>Son olarak bir haber daha verelim Pirate Bay&#8217;in 23 GB&#8217;lik arşivi de <a href="http://thepiratebay.org/torrent/5053827" target="_blank">paylaşıma</a> açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Korsan değil özgür yazılım kullanın!</p> + Tue, 18 Aug 2009 08:07:07 +0000 + + + Hakan Uygun: Mobil Cihazlar Dünyasında Neler Oluyor? + http://www.hakanuygun.com/blog/?p=279 + http://www.hakanuygun.com/blog/?p=279 + +<p><img class="aligncenter size-full wp-image-282" title="moblin" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/07/moblin.jpg" alt="moblin" width="280" height="151" />Bir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. <a href="http://en.wikipedia.org/wiki/Android_os" target="_blank">Android</a>, <a href="http://en.wikipedia.org/wiki/WebOS" target="_blank">WebOS</a> ve <a href="http://en.wikipedia.org/wiki/IPhone_OS" target="_blank">iPhone OS</a>&#8216;a  karşı <a href="http://en.wikipedia.org/wiki/Symbian_OS" target="_blank">Symbian</a>&#8216;ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan&#8217;ı açık kaynak kodlu olarak  bu vakfa devretmişti.</p> +<p>Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC&#8217;lerin geliştirilmesine olanak sağladı ve NetBook&#8217;lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.</p> +<p>Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : <a href="http://en.wikipedia.org/wiki/Moblin" target="_blank">Moblin</a>.</p> +<p>Moblin&#8217;e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia&#8217;nın kendi tabletlerinde kullanmak amacıyla ürettiği <a href="http://en.wikipedia.org/wiki/Maemo_%28operating_system%29" target="_blank">Maemo</a>&#8216;yu desteklemeye karar verdiğini açıkladı. Intel&#8217;de Moblin&#8217;i Linux Vakfı&#8217;na devrettiğini ve destek konusunda da Novell&#8217;le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia&#8217;nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo&#8217;yu Qt&#8217;ye taşıyacağını ilan etti.</p> +<p>İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo&#8217;yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID&#8217;ler üretip bunlarda Mameo&#8217;mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian&#8217;ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID&#8217;ler konusunda neler planlıyor? Bu planları içerisinde Moblin&#8217;i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo&#8217;ya yatırım mı yapacaklar? NetBook&#8217;larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?</p> +<p>Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?</p> + Tue, 07 Jul 2009 11:04:23 +0000 + + + Hakan Uygun: LKD Genel Kurulu için Ankara’ya + http://www.hakanuygun.com/blog/?p=259 + http://www.hakanuygun.com/blog/?p=259 + +<p>Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara&#8217;ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan&#8217;dan&#8230;</p> +<p>***</p> +<p>Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,</p> +<p>TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca<br /> +kurum.<br /> +Evet bu kurum malesef bilet satmak istemiyor.</p> +<p>1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir<br /> +sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları<br /> +portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)</p> +<p>2- web sitesindeki bilet <span id="OBJ_PREFIX_DWT255" class="Object"><span id="OBJ_PREFIX_DWT256" class="Object">sat</span></span>ış uygulamasında banka sıra makinelerinin bir<br /> +türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk<br /> +karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin<br /> +bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir<br /> +koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve<br /> +internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.<br /> +(İnternet olsun dedim, sonuç yok dedi.)</p> +<p>3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,<br /> +veee&#8230; Evet, biz <span id="OBJ_PREFIX_DWT257" class="Object"><span id="OBJ_PREFIX_DWT258" class="Object">sat</span></span>ıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii<br /> +ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.</p> +<p>4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize<br /> +bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş<br /> +dönüş 1 tam 1 öğrenci istiyorum. <span id="OBJ_PREFIX_DWT259" class="Object"><span id="OBJ_PREFIX_DWT260" class="Object">Sat</span></span>ıcı önce<br /> +- G/D kesmiyorum diyor buradan.!<br /> +- Nasıl yani?<br /> +- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?<br /> +Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.<br /> +- Elbette G/D niye alayım indirim var diyorum.<br /> +Neyse girip deniyor, gelen koltuk numaralarını soruyorum.<br /> +- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)<br /> +- Değiştiremiyor musunuz?<br /> +- Malesef.<br /> +- Internet sürümüne mi giriyorsunuz diyorum ister istemez.<br /> +- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen<br /> +üzerine ek komisyon ekleniyor sadece.)<br /> +- Kim koltuk seçtiriyor bana ?<br /> +- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.</p> +<p>5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.<br /> +Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk<br /> +seçebiliyor musunuz?<br /> +- Bakalım yer boş olursa seçebiliriz diyor <span id="OBJ_PREFIX_DWT261" class="Object"><span id="OBJ_PREFIX_DWT262" class="Object">sat</span></span>ıcı bu kez.<br /> +- Ohh nihayet.<br /> +- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.<br /> +- Öğrencide G/D farkı yok cevabı geliyor.<br /> +- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız<br /> +öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe<br /> +yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat<br /> +eder.)<br /> +- Kredi kartımı, peşin mi?<br /> +- DIINN ! kredi kartı.. var dimi?<br /> +- Evet, 112 TL<br /> +- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.</p> +<p>Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye<br /> +kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak<br /> +bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.</p> +<p>Velhasıl,<br /> +Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX<br /> +Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX</p> +<p>Hayırlı yolculuklar.</p> +<p>=====================<br /> +Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor<br /> +daha. 2-3 nolarda <span id="OBJ_PREFIX_DWT263" class="Object"><span id="OBJ_PREFIX_DWT264" class="Object">sat</span></span>ılan yerler var.</p> +<p>Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya<br /> +<span id="OBJ_PREFIX_DWT265" class="Object">sat</span>ış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi<br /> +bekliyor olabilir, kimbilir?</p> +<p>Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?</p> + Wed, 17 Jun 2009 21:33:17 +0000 + + + Hakan Uygun: IE, WTW ve Gıda Yardımı + http://www.hakanuygun.com/blog/?p=248 + http://www.hakanuygun.com/blog/?p=248 + +<p><a href="http://walktheweb.wfp.org/" target="_blank"><img class="aligncenter size-full wp-image-252" title="wfp-wtw" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/06/wfp-wtw.png" alt="wfp-wtw" width="512" height="240" /></a>Bugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft&#8217;un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8&#8242;in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara <a href="http://www.browserforthebetter.com/download.html" target="_blank">buradan</a> ulaşabilirsiniz&#8230;</p> +<p>Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin <a href="http://www.techcrunch.com/" target="_blank">TechCrunch</a>&#8216;da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.</p> +<p>İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin&#8230; Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı&#8217;nın <a href="http://walktheweb.wfp.org/" target="_blank">Walk The Web</a> kampanyasına bir göz atmanızı öneririm&#8230;</p> +<ul> +<li><a href="http://www.wfp.org/" target="_blank">www.wfp.org</a></li> +<li><a href="http://www.actionagainsthunger.org/" target="_blank">www.actionagainsthunger.org</a></li> +<li><a href="http://www.hakanuygun.com/blog/www.makepovertyhistory.org" target="_blank">www.makepovertyhistory.org</a></li> +<li><a href="http://www.standagainstpoverty.org" target="_blank">www.standagainstpoverty.org</a></li> +<li><a href="http://www.engineersagainstpoverty.org" target="_blank">www.engineersagainstpoverty.org</a></li> +<li><a href="http://www.whiteband.org" target="_blank">www.whiteband.org</a></li> +</ul> +<p>Son olarak da bugünlerde herkese önerdiğim gibi <a href="http://www.facebook.com/ext/share.php?sid=107634228486&h=FwnnE&u=6crnv&ref=mf" target="_blank">Yuva</a> ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.</p> + Tue, 16 Jun 2009 11:38:02 +0000 + + + Hakan Uygun: TBD Bilişim Kongresi’nde Özgür Yazılım Paneli + http://www.hakanuygun.com/blog/?p=244 + http://www.hakanuygun.com/blog/?p=244 + +<p>TBD&#8217;nin bu yıl 3.sünü düzenlediği <a href="http://www.istanbulbilisimkongresi.org.tr/" target="_blank">İstanbul Bilişim Kongresi</a>&#8216;nde Pazar günü saat 14:00&#8242;de Özgür Yazılım Paneli olacaktır. Panel&#8217;de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur&#8230;</p> +<p><strong> Yer:</strong> Marmara Üniversitesi Nişantaşı Kampüsü<br /> +Erdal İnönü Bilim ve Kültür Merkezi<br /> +<strong>Tarih:</strong> 31 Mayıs Pazar, 14:00 - 15:20<br /> +<strong>Oturum başkanı:</strong> Görkem Çetin<br /> +<strong>Konuşmacılar:</strong> Enver Altın, Hakan Uygun, Cahit Cengizhan</p> + Thu, 28 May 2009 16:22:08 +0000 + + + Hakan Uygun: Sıralama Algoritmaları + http://www.hakanuygun.com/blog/?p=231 + http://www.hakanuygun.com/blog/?p=231 + +<p>Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu <a href="http://www.sorting-algorithms.com/" target="_blank">siteye</a> bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz&#8230;</p> + Mon, 13 Apr 2009 08:20:53 +0000 + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/www/sidebar.html b/DJAGEN/tags/djagen_old/djagen/gezegen/www/sidebar.html new file mode 100755 index 0000000..8709a87 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/www/sidebar.html @@ -0,0 +1,86 @@ + diff --git a/DJAGEN/tags/djagen_old/djagen/gezegen/www/simple.html b/DJAGEN/tags/djagen_old/djagen/gezegen/www/simple.html new file mode 100755 index 0000000..c20bb0f --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/gezegen/www/simple.html @@ -0,0 +1,439 @@ + + + + + + + Linux Gezegeni + + + + + + + + + + + + + +

16 March 2010

+ +
+ + + +
+
+

Yakın Doğu’da Seminer Rüzgarları

+
+
+

Geçen haftadan beri Yakın Doğu Üniversitesi’nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir’lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu’nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.

+

Yakın Doğu’ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.

+

+

Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.

+

+

Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.

+

+

Ali Erdinc’in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.

+

+

Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite duyuru sayfası, Facebook ve Twitter‘dan takip edebileceklerini söyleyelim. Hatta Kıbrıs’ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.

+

Lefkoşa’ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs’tan bildirdi.

+

+
+ + + +
+ +
+

05 February 2010

+ +
+ + + +
+
+

100 ml

+
+
+

1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs’a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.

+

Şimdi olayın benim açımdan garip noktalarına gelelim

+

* Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.

+

* Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.

+

* Elimle çöpe attım, o çok koydu.

+

Ben de bunun üzerine Ulaştırma Bakanlığı’na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006′da İngiltere’de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006′da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç’te, ABD ve Kanada’da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt’lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:

+

“Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.”

+

Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.

+

Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.

+
+ + + +
+ +
+

29 January 2010

+ +
+ + + +
+
+

Artık Sun yok!

+
+
+

iPsunoraclead haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun’ı satın alma işlemini bitirdi. Artık www.sun.com adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.

+

Beni en çok ilgilendiren konular ise Sun’ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.

+

Umarım hepimiz için mutlu son olur…

+

Ek: Kültür Mantarı‘nın yönlendirmesi ile James Gosling’in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım…

+

sunrip

+ + + +
+ +
+

24 December 2009

+ +
+ + + +
+
+

EMO 13. Ulusal Kongresi

+
+
+

EMO’nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan 13. Ulusal Kongresi kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda “Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz” ve 11.30-12.30 arasında da “Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.

+

Genel olarak yüklü bir programı olan bu etkinlikte çeşitli LKD seminerleri de olacak. Buyrunuz geliniz!

+ + + +
+ +
+

24 September 2009

+ +
+ + + +
+
+

Intel, Atom, Moblin

+
+
+

Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel’e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin’i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel’den üç önemli açıklama oldu…

+

Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. Atom Developer Program‘ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var… ( Ben kayıt olacağım :) )

+

İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin’in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir akıllı telefon üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu :) Geçenlerde de yazmıştım,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız…

+ + + +
+ +
+

25 August 2009

+ +
+ + + +
+
+

Teknik Destek Kopya Kağıtı

+
+
+

xkcd’de geçen gün yayınlanan bu teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.

+

teknikdestek
+İsteyenler için ODF hali de burada

+ + + +
+ +
+

18 August 2009

+ +
+ + + +
+
+

Korsan Değil “Fikir Hırsızı”

+
+
+

Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu’nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma başlatmış. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.

+

Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek :) Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor…

+

Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.

+

Son olarak bir haber daha verelim Pirate Bay’in 23 GB’lik arşivi de paylaşıma açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin :) Korsan değil özgür yazılım kullanın!

+ + + +
+ +
+

07 July 2009

+ +
+ + + +
+
+

Mobil Cihazlar Dünyasında Neler Oluyor?

+
+
+

moblinBir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. Android, WebOS ve iPhone OS‘a  karşı Symbian‘ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan’ı açık kaynak kodlu olarak  bu vakfa devretmişti.

+

Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC’lerin geliştirilmesine olanak sağladı ve NetBook’lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.

+

Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : Moblin.

+

Moblin’e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia’nın kendi tabletlerinde kullanmak amacıyla ürettiği Maemo‘yu desteklemeye karar verdiğini açıkladı. Intel’de Moblin’i Linux Vakfı’na devrettiğini ve destek konusunda da Novell’le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia’nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo’yu Qt’ye taşıyacağını ilan etti.

+

İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo’yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID’ler üretip bunlarda Mameo’mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian’ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID’ler konusunda neler planlıyor? Bu planları içerisinde Moblin’i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo’ya yatırım mı yapacaklar? NetBook’larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?

+

Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?

+ + + +
+ +
+

17 June 2009

+ +
+ + + +
+
+

LKD Genel Kurulu için Ankara’ya

+
+
+

Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara’ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan’dan…

+

***

+

Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,

+

TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca
+kurum.
+Evet bu kurum malesef bilet satmak istemiyor.

+

1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir
+sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları
+portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)

+

2- web sitesindeki bilet satış uygulamasında banka sıra makinelerinin bir
+türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk
+karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin
+bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir
+koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve
+internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.
+(İnternet olsun dedim, sonuç yok dedi.)

+

3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,
+veee… Evet, biz satıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii
+ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.

+

4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize
+bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş
+dönüş 1 tam 1 öğrenci istiyorum. Satıcı önce
+- G/D kesmiyorum diyor buradan.!
+- Nasıl yani?
+- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?
+Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.
+- Elbette G/D niye alayım indirim var diyorum.
+Neyse girip deniyor, gelen koltuk numaralarını soruyorum.
+- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)
+- Değiştiremiyor musunuz?
+- Malesef.
+- Internet sürümüne mi giriyorsunuz diyorum ister istemez.
+- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen
+üzerine ek komisyon ekleniyor sadece.)
+- Kim koltuk seçtiriyor bana ?
+- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.

+

5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.
+Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk
+seçebiliyor musunuz?
+- Bakalım yer boş olursa seçebiliriz diyor satıcı bu kez.
+- Ohh nihayet.
+- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.
+- Öğrencide G/D farkı yok cevabı geliyor.
+- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız
+öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe
+yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat
+eder.)
+- Kredi kartımı, peşin mi?
+- DIINN ! kredi kartı.. var dimi?
+- Evet, 112 TL
+- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.

+

Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye
+kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak
+bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.

+

Velhasıl,
+Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX
+Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX

+

Hayırlı yolculuklar.

+

=====================
+Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor
+daha. 2-3 nolarda satılan yerler var.

+

Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya
+satış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi
+bekliyor olabilir, kimbilir?

+

Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?

+ + + +
+ +
+

16 June 2009

+ +
+ + + +
+
+

IE, WTW ve Gıda Yardımı

+
+
+

wfp-wtwBugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft’un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8′in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara buradan ulaşabilirsiniz…

+

Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin TechCrunch‘da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.

+

İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin… Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı’nın Walk The Web kampanyasına bir göz atmanızı öneririm…

+ +

Son olarak da bugünlerde herkese önerdiğim gibi Yuva ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.

+ + + +
+ +
+

28 May 2009

+ +
+ + + +
+
+

TBD Bilişim Kongresi’nde Özgür Yazılım Paneli

+
+
+

TBD’nin bu yıl 3.sünü düzenlediği İstanbul Bilişim Kongresi‘nde Pazar günü saat 14:00′de Özgür Yazılım Paneli olacaktır. Panel’de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur…

+

Yer: Marmara Üniversitesi Nişantaşı Kampüsü
+Erdal İnönü Bilim ve Kültür Merkezi
+Tarih: 31 Mayıs Pazar, 14:00 - 15:20
+Oturum başkanı: Görkem Çetin
+Konuşmacılar: Enver Altın, Hakan Uygun, Cahit Cengizhan

+ + + +
+ +
+

13 April 2009

+ +
+ + + +
+
+

Sıralama Algoritmaları

+
+
+

Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu siteye bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz…

+ + + +
+ +
+
+ + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/manage.py b/DJAGEN/tags/djagen_old/djagen/manage.py new file mode 100755 index 0000000..5e78ea9 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/manage.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +from django.core.management import execute_manager +try: + import settings # Assumed to be in the same directory. +except ImportError: + import sys + sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) + sys.exit(1) + +if __name__ == "__main__": + execute_manager(settings) diff --git a/DJAGEN/tags/djagen_old/djagen/settings.py b/DJAGEN/tags/djagen_old/djagen/settings.py new file mode 100755 index 0000000..022b4f4 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/settings.py @@ -0,0 +1,92 @@ +# Django settings for djagen project. +import os +BASEPATH = '/home/cad/Workspace/djagen_ws/gezegen/branches/mustafa_branch' + +DEBUG = True +TEMPLATE_DEBUG = DEBUG + +ADMINS = ( + # ('Your Name', 'your_email@domain.com'), +) + +MANAGERS = ADMINS + +DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. +DATABASE_NAME = 'djagendb' # Or path to database file if using sqlite3. +DATABASE_USER = 'root' # Not used with sqlite3. +DATABASE_PASSWORD = '1234' # Not used with sqlite3. +DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3. +DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3. + +# Local time zone for this installation. Choices can be found here: +# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name +# although not all choices may be available on all operating systems. +# If running in a Windows environment this must be set to the same as your +# system time zone. +TIME_ZONE = 'Europe/Istanbul' + +# Language code for this installation. All choices can be found here: +# http://www.i18nguy.com/unicode/language-identifiers.html +LANGUAGE_CODE = 'tr' + +SITE_ID = 1 + +# If you set this to False, Django will make some optimizations so as not +# to load the internationalization machinery. +USE_I18N = True + +# Absolute path to the directory that holds media. +# Example: "/home/media/media.lawrence.com/" +MEDIA_ROOT = '/var/www/localhost/htdocs/djagen_media/' + +# URL that handles the media served from MEDIA_ROOT. Make sure to use a +# trailing slash if there is a path component (optional in other cases). +# Examples: "http://media.lawrence.com", "http://example.com/media/" +MEDIA_URL = 'http://django.localhost.in/' + +# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a +# trailing slash. +# Examples: "http://foo.com/media/", "/media/". +ADMIN_MEDIA_PREFIX = '/admin_media/' + +# Make this unique, and don't share it with anybody. +SECRET_KEY = '^w^o-$s4#k$&+xa5o$mi(cnz+21xu^+&!e-wib+&vn*w)3+=tc' + +# List of callables that know how to import templates from various sources. +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.load_template_source', + 'django.template.loaders.app_directories.load_template_source', + 'django.template.loaders.eggs.load_template_source', +) + +MIDDLEWARE_CLASSES = ( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.csrf.middleware.CsrfMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', +) + +ROOT_URLCONF = 'djagen.urls' + + + +TEMPLATE_DIRS = ( + # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". + # Always use forward slashes, even on Windows. + # Don't forget to use absolute paths, not relative paths. + #os.path.join(BASEPATH, '/templates'), + os.path.join(BASEPATH,'templates/'), +) + +INSTALLED_APPS = ( + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.sites', + 'djagen.collector', + 'djagen.captcha', + 'django.contrib.admin', +) + +FILE_UPLOAD_TEMP_DIR = os.path.join(BASEPATH, 'templates/tmp') +MAIN_PATH = os.path.join(BASEPATH, 'gezegen') diff --git a/DJAGEN/tags/djagen_old/djagen/templates/1.html b/DJAGEN/tags/djagen_old/djagen/templates/1.html new file mode 100755 index 0000000..06ffbcf --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/1.html @@ -0,0 +1,7 @@ + +{% for entry in entries_list %} + +{% ifchanged %}

{{ entry.date|date:"d F Y" }}

{% endifchanged %} +
  • {{ entry.title }}
  • +{{ entry.content_html }} +{% endfor %} \ No newline at end of file diff --git a/DJAGEN/tags/djagen_old/djagen/templates/archive.tmpl b/DJAGEN/tags/djagen_old/djagen/templates/archive.tmpl new file mode 100755 index 0000000..e8e2cfd --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/archive.tmpl @@ -0,0 +1,397 @@ + + + + Linux Gezegeni + + + + + + + + + + + + + + + +
    + + +
    + + + +
    + {% if error %} + + Bir hata oluşu! + {% endif %} +

    {{ archive_year }} yılı {% if archive_month %} {{ archive_month }}. ay, {% endif %}gezegen arşivi:

    + {% ifnotequal p_entries_list.paginator.num_pages 1 %} + +{% comment %} + +{% endcomment %} +{% endifnotequal %} + +
    + + {% if not entries_list %} + + Gösterecek veri yok! +


    + {% endif %} + + {% for entry in p_entries_list.object_list|slice:items_per_page %} + {% autoescape off %} + + {% ifequal entry.entry_id.is_approved 1 %} + + {% ifchanged %}

    {{ entry.date|date:"d F Y" }}

    {% endifchanged %} + + +
    + + +
    +
    +
    +

    + {{ entry.title }} +

    +
    +
    +
    + + + {{ entry.content_html|truncatewords_html:truncate_words }} + +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    + {% endifequal %} + + {% endautoescape %} + + {% endfor %} + + + {% ifnotequal p_entries_list.paginator.num_pages 1 %} + +
    +{% endifnotequal %} + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main.tmpl b/DJAGEN/tags/djagen_old/djagen/templates/main.tmpl new file mode 100755 index 0000000..d0b6a38 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main.tmpl @@ -0,0 +1,326 @@ + + + + Linux Gezegeni + + + + + + + + + + + + + + + +
    + + +
    + + +
    + + {% for entry in entries_list|slice:items_per_page %} + {% autoescape off %} + + {% ifequal entry.entry_id.is_approved 1 %} + + {% ifchanged %}

    {{ entry.date|date:"d F Y" }}

    {% endifchanged %} + + +
    + + +
    +
    +
    +

    + {{ entry.title }} +

    +
    +
    +
    + + + {{ entry.content_html|truncatewords_html:truncate_words }} + +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    + {% endifequal %} + + {% endautoescape %} + + {% endfor %} + + + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/atom.xml b/DJAGEN/tags/djagen_old/djagen/templates/main/atom.xml new file mode 100755 index 0000000..1926d18 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/atom.xml @@ -0,0 +1,347 @@ + + + + Linux Gezegeni + + + http://gezegen.linux.org.tr/rss20.xml + 2010-04-11T23:16:31+00:00 + Planet/2.0 +http://www.planetplanet.org + + + Yakın Doğu’da Seminer Rüzgarları + + http://www.loopbacking.info/blog/?p=98 + 2010-03-16T17:40:18+00:00 + <p>Geçen haftadan beri Yakın Doğu Üniversitesi&#8217;nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir&#8217;lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu&#8217;nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.</p> +<p>Yakın Doğu&#8217;ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697.jpg"><img class="alignnone size-medium wp-image-99" title="Linux Nedir Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704.jpg"><img class="alignnone size-medium wp-image-100" title="Linux Nedir Seminer" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757.jpg"><img class="alignnone size-medium wp-image-101" title="YDU AEK Internet'in Yapı Taşları Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Ali Erdinc&#8217;in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759.jpg"><img class="alignnone size-medium wp-image-102" title="Internet'in Yapı Taşları" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite <a href="http://duyuru.neu.edu.tr">duyuru sayfası</a>, <a href="http://www.facebook.com/NearEastUniversity">Facebook</a> ve <a href="http://twitter.com/NearEastUniv">Twitter</a>&#8216;dan takip edebileceklerini söyleyelim. Hatta Kıbrıs&#8217;ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.</p> +<p>Lefkoşa&#8217;ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs&#8217;tan bildirdi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770.jpg"><img class="alignnone size-medium wp-image-103" title="Inovasyon Merkezi, tarla" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770-300x199.jpg" alt="" width="300" height="199" /></a></p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/dmDtp8fRToI" height="1" width="1" /> + + Oğuz Yarımtepe + http://www.loopbacking.info/blog + + + import me » Gezegen + There is no life here + + 2 + 2010-04-11T17:01:22+00:00 + + + + + 100 ml + + http://www.loopbacking.info/blog/?p=95 + 2010-02-05T12:19:21+00:00 + <p>1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs&#8217;a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.</p> +<p>Şimdi olayın benim açımdan garip noktalarına gelelim</p> +<p>* Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.</p> +<p>* Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.</p> +<p>* Elimle çöpe attım, o çok koydu.</p> +<p>Ben de bunun üzerine Ulaştırma Bakanlığı&#8217;na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006&#8242;da İngiltere&#8217;de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006&#8242;da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç&#8217;te, ABD ve Kanada&#8217;da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt&#8217;lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:</p> +<p>&#8220;Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.&#8221;</p> +<p>Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.</p> +<p>Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.</p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/nubepmpaYEk" height="1" width="1" /> + + Oğuz Yarımtepe + http://www.loopbacking.info/blog + + + import me » Gezegen + There is no life here + + 2 + 2010-04-11T17:01:22+00:00 + + + + + Artık Sun yok! + + http://www.hakanuygun.com/blog/?p=432 + 2010-01-29T09:28:25+00:00 + <p>iP<a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif"><img class="alignleft size-full wp-image-434" title="sunoracle" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif" alt="sunoracle" width="202" height="122" /></a>ad haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun&#8217;ı satın alma işlemini bitirdi. Artık <a href="http://www.sun.com" target="_blank">www.sun.com</a> adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.</p> +<p>Beni en çok ilgilendiren konular ise Sun&#8217;ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.</p> +<p>Umarım hepimiz için mutlu son olur&#8230;</p> +<p><strong>Ek</strong>: <a href="http://www.kulturmantari.org/" target="_blank">Kültür Mantarı</a>&#8216;nın yönlendirmesi ile <a href="http://blogs.sun.com/jag/entry/so_long_old_friend" target="_blank">James Gosling&#8217;</a>in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım&#8230;</p> +<p><a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip.jpg"><img class="aligncenter size-medium wp-image-445" title="sunrip" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip-300x234.jpg" alt="sunrip" width="300" height="234" /></a></p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + + EMO 13. Ulusal Kongresi + + http://www.hakanuygun.com/blog/?p=381 + 2009-12-24T15:45:26+00:00 + <p>EMO&#8217;nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan <a href="http://www.ulusalkongre.org" target="_blank">13. Ulusal Kongre</a>si kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda &#8220;Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz&#8221; ve 11.30-12.30 arasında da &#8220;Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.</p> +<p>Genel olarak yüklü bir programı olan bu etkinlikte çeşitli <a href="http://haber.linux.org.tr/2009/12/23-26-aralik-emo-ulusal-kongre-ankara-linux-seminerleri/" target="_blank">LKD seminerleri</a> de olacak. Buyrunuz geliniz!</p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + + Intel, Atom, Moblin + + http://www.hakanuygun.com/blog/?p=338 + 2009-09-24T09:00:51+00:00 + <p>Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel&#8217;e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin&#8217;i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel&#8217;den üç önemli açıklama oldu&#8230;</p> +<p>Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. <a href="http://appdeveloper.intel.com/en-us/">Atom Developer Program</a>&#8216;ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var&#8230; ( Ben kayıt olacağım <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> )</p> +<p>İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin&#8217;in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir <a href="http://www.engadget.com/2009/09/22/intel-announces-moblin-2-1-for-phones/#continued">akıllı telefon</a> üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Geçenlerde de <a href="http://www.hakanuygun.com/blog/?p=279">yazmıştım</a>,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız&#8230;</p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + + Teknik Destek Kopya Kağıtı + + http://www.hakanuygun.com/blog/?p=330 + 2009-08-25T07:28:26+00:00 + <p>xkcd&#8217;de geçen gün yayınlanan <a href="http://xkcd.com/627/">bu</a> teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.</p> +<p><img class="aligncenter size-full wp-image-331" title="teknikdestek" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.png" alt="teknikdestek" width="468" height="461" /><br /> +İsteyenler için ODF hali de <a href="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.odg">burada</a></p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + + Korsan Değil &#8220;Fikir Hırsızı&#8221; + + http://www.hakanuygun.com/blog/?p=312 + 2009-08-18T08:07:07+00:00 + <p>Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu&#8217;nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma <a href="http://www.ntv.com.tr/id/24992251/" target="_blank">başlatmış</a>. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.</p> +<p>Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor&#8230;</p> +<p>Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.</p> +<p>Son olarak bir haber daha verelim Pirate Bay&#8217;in 23 GB&#8217;lik arşivi de <a href="http://thepiratebay.org/torrent/5053827" target="_blank">paylaşıma</a> açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Korsan değil özgür yazılım kullanın!</p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + + Mobil Cihazlar Dünyasında Neler Oluyor? + + http://www.hakanuygun.com/blog/?p=279 + 2009-07-07T11:04:23+00:00 + <p><img class="aligncenter size-full wp-image-282" title="moblin" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/07/moblin.jpg" alt="moblin" width="280" height="151" />Bir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. <a href="http://en.wikipedia.org/wiki/Android_os" target="_blank">Android</a>, <a href="http://en.wikipedia.org/wiki/WebOS" target="_blank">WebOS</a> ve <a href="http://en.wikipedia.org/wiki/IPhone_OS" target="_blank">iPhone OS</a>&#8216;a  karşı <a href="http://en.wikipedia.org/wiki/Symbian_OS" target="_blank">Symbian</a>&#8216;ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan&#8217;ı açık kaynak kodlu olarak  bu vakfa devretmişti.</p> +<p>Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC&#8217;lerin geliştirilmesine olanak sağladı ve NetBook&#8217;lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.</p> +<p>Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : <a href="http://en.wikipedia.org/wiki/Moblin" target="_blank">Moblin</a>.</p> +<p>Moblin&#8217;e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia&#8217;nın kendi tabletlerinde kullanmak amacıyla ürettiği <a href="http://en.wikipedia.org/wiki/Maemo_%28operating_system%29" target="_blank">Maemo</a>&#8216;yu desteklemeye karar verdiğini açıkladı. Intel&#8217;de Moblin&#8217;i Linux Vakfı&#8217;na devrettiğini ve destek konusunda da Novell&#8217;le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia&#8217;nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo&#8217;yu Qt&#8217;ye taşıyacağını ilan etti.</p> +<p>İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo&#8217;yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID&#8217;ler üretip bunlarda Mameo&#8217;mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian&#8217;ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID&#8217;ler konusunda neler planlıyor? Bu planları içerisinde Moblin&#8217;i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo&#8217;ya yatırım mı yapacaklar? NetBook&#8217;larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?</p> +<p>Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?</p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + + LKD Genel Kurulu için Ankara&#8217;ya + + http://www.hakanuygun.com/blog/?p=259 + 2009-06-17T21:33:17+00:00 + <p>Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara&#8217;ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan&#8217;dan&#8230;</p> +<p>***</p> +<p>Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,</p> +<p>TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca<br /> +kurum.<br /> +Evet bu kurum malesef bilet satmak istemiyor.</p> +<p>1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir<br /> +sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları<br /> +portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)</p> +<p>2- web sitesindeki bilet <span id="OBJ_PREFIX_DWT255" class="Object"><span id="OBJ_PREFIX_DWT256" class="Object">sat</span></span>ış uygulamasında banka sıra makinelerinin bir<br /> +türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk<br /> +karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin<br /> +bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir<br /> +koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve<br /> +internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.<br /> +(İnternet olsun dedim, sonuç yok dedi.)</p> +<p>3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,<br /> +veee&#8230; Evet, biz <span id="OBJ_PREFIX_DWT257" class="Object"><span id="OBJ_PREFIX_DWT258" class="Object">sat</span></span>ıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii<br /> +ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.</p> +<p>4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize<br /> +bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş<br /> +dönüş 1 tam 1 öğrenci istiyorum. <span id="OBJ_PREFIX_DWT259" class="Object"><span id="OBJ_PREFIX_DWT260" class="Object">Sat</span></span>ıcı önce<br /> +- G/D kesmiyorum diyor buradan.!<br /> +- Nasıl yani?<br /> +- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?<br /> +Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.<br /> +- Elbette G/D niye alayım indirim var diyorum.<br /> +Neyse girip deniyor, gelen koltuk numaralarını soruyorum.<br /> +- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)<br /> +- Değiştiremiyor musunuz?<br /> +- Malesef.<br /> +- Internet sürümüne mi giriyorsunuz diyorum ister istemez.<br /> +- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen<br /> +üzerine ek komisyon ekleniyor sadece.)<br /> +- Kim koltuk seçtiriyor bana ?<br /> +- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.</p> +<p>5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.<br /> +Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk<br /> +seçebiliyor musunuz?<br /> +- Bakalım yer boş olursa seçebiliriz diyor <span id="OBJ_PREFIX_DWT261" class="Object"><span id="OBJ_PREFIX_DWT262" class="Object">sat</span></span>ıcı bu kez.<br /> +- Ohh nihayet.<br /> +- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.<br /> +- Öğrencide G/D farkı yok cevabı geliyor.<br /> +- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız<br /> +öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe<br /> +yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat<br /> +eder.)<br /> +- Kredi kartımı, peşin mi?<br /> +- DIINN ! kredi kartı.. var dimi?<br /> +- Evet, 112 TL<br /> +- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.</p> +<p>Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye<br /> +kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak<br /> +bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.</p> +<p>Velhasıl,<br /> +Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX<br /> +Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX</p> +<p>Hayırlı yolculuklar.</p> +<p>=====================<br /> +Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor<br /> +daha. 2-3 nolarda <span id="OBJ_PREFIX_DWT263" class="Object"><span id="OBJ_PREFIX_DWT264" class="Object">sat</span></span>ılan yerler var.</p> +<p>Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya<br /> +<span id="OBJ_PREFIX_DWT265" class="Object">sat</span>ış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi<br /> +bekliyor olabilir, kimbilir?</p> +<p>Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?</p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + + IE, WTW ve Gıda Yardımı + + http://www.hakanuygun.com/blog/?p=248 + 2009-06-16T11:38:02+00:00 + <p><a href="http://walktheweb.wfp.org/" target="_blank"><img class="aligncenter size-full wp-image-252" title="wfp-wtw" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/06/wfp-wtw.png" alt="wfp-wtw" width="512" height="240" /></a>Bugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft&#8217;un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8&#8242;in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara <a href="http://www.browserforthebetter.com/download.html" target="_blank">buradan</a> ulaşabilirsiniz&#8230;</p> +<p>Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin <a href="http://www.techcrunch.com/" target="_blank">TechCrunch</a>&#8216;da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.</p> +<p>İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin&#8230; Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı&#8217;nın <a href="http://walktheweb.wfp.org/" target="_blank">Walk The Web</a> kampanyasına bir göz atmanızı öneririm&#8230;</p> +<ul> +<li><a href="http://www.wfp.org/" target="_blank">www.wfp.org</a></li> +<li><a href="http://www.actionagainsthunger.org/" target="_blank">www.actionagainsthunger.org</a></li> +<li><a href="http://www.hakanuygun.com/blog/www.makepovertyhistory.org" target="_blank">www.makepovertyhistory.org</a></li> +<li><a href="http://www.standagainstpoverty.org" target="_blank">www.standagainstpoverty.org</a></li> +<li><a href="http://www.engineersagainstpoverty.org" target="_blank">www.engineersagainstpoverty.org</a></li> +<li><a href="http://www.whiteband.org" target="_blank">www.whiteband.org</a></li> +</ul> +<p>Son olarak da bugünlerde herkese önerdiğim gibi <a href="http://www.facebook.com/ext/share.php?sid=107634228486&h=FwnnE&u=6crnv&ref=mf" target="_blank">Yuva</a> ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.</p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + + TBD Bilişim Kongresi&#8217;nde Özgür Yazılım Paneli + + http://www.hakanuygun.com/blog/?p=244 + 2009-05-28T16:22:08+00:00 + <p>TBD&#8217;nin bu yıl 3.sünü düzenlediği <a href="http://www.istanbulbilisimkongresi.org.tr/" target="_blank">İstanbul Bilişim Kongresi</a>&#8216;nde Pazar günü saat 14:00&#8242;de Özgür Yazılım Paneli olacaktır. Panel&#8217;de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur&#8230;</p> +<p><strong> Yer:</strong> Marmara Üniversitesi Nişantaşı Kampüsü<br /> +Erdal İnönü Bilim ve Kültür Merkezi<br /> +<strong>Tarih:</strong> 31 Mayıs Pazar, 14:00 - 15:20<br /> +<strong>Oturum başkanı:</strong> Görkem Çetin<br /> +<strong>Konuşmacılar:</strong> Enver Altın, Hakan Uygun, Cahit Cengizhan</p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + + Sıralama Algoritmaları + + http://www.hakanuygun.com/blog/?p=231 + 2009-04-13T08:20:53+00:00 + <p>Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu <a href="http://www.sorting-algorithms.com/" target="_blank">siteye</a> bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz&#8230;</p> + + Hakan + http://www.hakanuygun.com/blog + + + hakan.uygun.yazıyor.* » Gezegen + + 1 + 2010-04-11T17:01:26+00:00 + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/bloggers.css b/DJAGEN/tags/djagen_old/djagen/templates/main/bloggers.css new file mode 100755 index 0000000..30bc15b --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/bloggers.css @@ -0,0 +1,55 @@ +#bloggers { + /* position: absolute; */ + top: 115px; + right: 15px; + width: 230px; +} + +#bloggers h2 { + margin-left: 0; + font-size: 12px; +} +#bloggers ul { + padding:0; + margin: 0 0 1.5em 0; + list-style-type:none; +} + +#bloggers ul li { + padding: 1px; +} + +#bloggers ul li div img { + +} + +#bloggers ul li div { + display: none; +} + +#bloggers ul li:hover > a { + font-weight: bold; +} +#bloggers ul li div img.head { + float: right; + padding: 0px; +} + +#bloggers ul li:hover > div { + display: inline; +} + +#bloggers ul li:hover { + padding: 0 0 10px 0; + background-color: #cfcfcf; +} + +#bloggers .ircnick { + display: block; + color: #000000; + font-style: italic; + padding: 2px; +} +#bloggers a:visited { + color: #5a7ac7 !important; +} diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/favicon.ico b/DJAGEN/tags/djagen_old/djagen/templates/main/favicon.ico new file mode 100755 index 0000000..96bc6d9 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main/favicon.ico differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/feeds.html b/DJAGEN/tags/djagen_old/djagen/templates/main/feeds.html new file mode 100755 index 0000000..f2bd421 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/feeds.html @@ -0,0 +1,26 @@ +
    + +
    diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/foafroll.xml b/DJAGEN/tags/djagen_old/djagen/templates/main/foafroll.xml new file mode 100755 index 0000000..13e9f83 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/foafroll.xml @@ -0,0 +1,42 @@ + + + + Linux Gezegeni + http://gezegen.linux.org.tr + + + + + Hakan Uygun + + + hakan.uygun.yazıyor.* » Gezegen + + + + + + + + + + Oğuz Yarımtepe + + + import me » Gezegen + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/foafroll.xml.old b/DJAGEN/tags/djagen_old/djagen/templates/main/foafroll.xml.old new file mode 100755 index 0000000..ccd0122 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/foafroll.xml.old @@ -0,0 +1,1043 @@ + + + + Linux Gezegeni + http://gezegen.linux.org.tr + + + + + A. Murat Eren + + + There is no silver bullet.. + + + + + + + + + + Adem Alp Yıldız + + + Adem Alp YILDIZ + + + + + + + + + + Ahmet Aygün + + + ~/blog + + + + + + + + + + Ahmet Yıldız + + + Bugün Linux + + + + + + + + + + Ali Erdinç Köroğlu + + + The Point of no return » LKD + + + + + + + + + + Ali Erkan İMREK + + + armut + + + + + + + + + + Alper Kanat + + + raptiye + + + + + + + + + + Alper Orus + + + + + + + + + + + + + Alper Oğuz + + + Alper'in günlüğü » Gezegen + + + + + + + + + + Alper Somuncu + + + alper somuncu nokta com - IBM AIX + + + + + + + + + + Anıl Özbek + + + + + + + + + + + + + Arman Aksoy + + + Expressed Exons » Gezegen + + + + + + + + + + Bahri Meriç Canlı + + + Bahri Meriç CANLI Kişisel Web Sitesi » Linux + + + + + + + + + + Barış Metin + + + + + + + + + + + + + Barış Özyurt + + + TuxWorkshop + + + + + + + + + + Bora Güngören + + + Bora Güngören + + + + + + + + + + Can Burak Çilingir + + + Can Burak Çilingir » gezegen-linux + + + + + + + + + + Can Kavaklıoğlu + + + Güncel günce + + + + + + + + + + Deniz Koçak + + + King of Kebab » lkd + + + + + + + + + + Devrim Gündüz + + + + + + + + + + + + + Doruk Fişek + + + Sit Alanı'nın Seyir Defteri » Gezegen + + + + + + + + + + Ekin Meroğlu + + + Sütlü Kahve + + + + + + + + + + Enver Altın + + + The truth about my life + + + + + + + + + + Erhan Ekici + + + bir delinin hatıra defteri » linux + + + + + + + + + + Erçin Eker + + + The Useless Journal v4 + + + + + + + + + + FTP ekibi + + + LKD FTP Ekibi + + + + + + + + + + Faik Uygur + + + Bir Takım Şeyler + + + + + + + + + + Fatih Arslan + + + Arslanlar Şehri » Gezegen + + + + + + + + + + Furkan Çalışkan + + + + + + + + + + + + + Gökdeniz Karadağ + + + hayalci'nin maceraları + + + + + + + + + + Gökmen Göksel + + + rat on red » gezegen.linux + + + + + + + + + + Gökmen Görgen + + + [~]> # Gökmen Görgen ;-) » oi + + + + + + + + + + Gürcan Öztürk + + + gurcanozturk.com + + + + + + + + + + Gürer Özen + + + Indiana Jones' Diary + + + + + + + + + + Hakan Uygun + + + hakan.uygun.yazıyor.* » Gezegen + + + + + + + + + + Hüseyin Uslu + + + + + + + + + + + + + K. Deniz Öğüt + + + Mare Nostrum + + + + + + + + + + Kaya Oğuz + + + + + + + + + + + + + Kerem Can Karakaş + + + Blog + + + + + + + + + + Koray Bostancı + + + olmayana ergi.. + + + + + + + + + + Kubilay Onur Güngör + + + Kırmızı ve Siyah » Gezegen + + + + + + + + + + LKD Gezegen Duyuruları + + + LKD Gezegen Çalışma Grubu + + + + + + + + + + LKD Seminer Duyuruları + + + Seminer çalışma grubu + + + + + + + + + + LKD YK + + + Linux Kullanıcıları Derneği Yönetim Kurulu » Günlük + + + + + + + + + + LKD.org.tr + + + Haberler + + + + + + + + + + Levent Yalçın + + + + + + + + + + + + + M.Murat Akbaş + + + Mehmet Murat AKBAS + + + + + + + + + + M.Tuğrul Yılmazer + + + + + + + + + + + + + Mehmet Büyüközer + + + + + + + + + + + + + Mehmet Salih Yıldırım + + + + + + + + + + + + + Murat Hazer + + + Murat HAZER + + + + + + + + + + Murat Koç + + + Koçhane Kayıtları + + + + + + + + + + Murat Sağlam + + + panhaema.com + + + + + + + + + + Murat TİKİL + + + murattikil + + + + + + + + + + Mustafa Karakaplan + + + MuKa PlaNeT + + + + + + + + + + Necati Demir + + + :(){ :|:& };: + + + + + + + + + + Necdet Yücel + + + nyucel's diary + + + + + + + + + + Nesimi Acarca + + + nesimia.com + + + + + + + + + + Nihad Karslı + + + Enki'den » Linux + + + + + + + + + + Onur Tolga Şehitoğlu + + + Onur'sal » Bilgisayar + + + + + + + + + + Onur Yalazı + + + www.yalazi.org + + + + + + + + + + Oğuz Yarımtepe + + + import me » Gezegen + + + + + + + + + + Penguen-CG + + + + + + + + + + + + + Python-TR + + + Python - Java + + + + + + + + + + Recai Oktaş + + + konu/teknik + + + + + + + + + + Serbülent Ünsal + + + Serbülent Ünsal'ın Web Günlüğü + + + + + + + + + + Serkan Altuntaş + + + serkan » Linux Gezegeni + + + + + + + + + + Serkan Kaba + + + Serkan Kaba + + + + + + + + + + Serkan Kenar + + + Kayıp Şehir / Serkan Kenar » debian + + + + + + + + + + Server Acim + + + Pardus, Müzik, Yaşam... + + + + + + + + + + Sinan Alyürük + + + Ayder Zamanı + + + + + + + + + + Stand + + + + + + + + + + + + + Talat Uyarer + + + Huzur Mekanı + + + + + + + + + + Tayfur Taybuğa + + + Tayfur Taybuğa + + + + + + + + + + Umur Erdinç + + + Umur'un Güncesi + + + + + + + + + + Web-CG + + + Web Çalışma Grubu + + + + + + + + + + Ömer Fadıl Usta + + + Bi'Log + + + + + + + + + + Özgürlükiçin.com + + + Özgürlük için... - Haberler + + + + + + + + + + Ümran Kamar + + + Morning Glory + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/generic.css b/DJAGEN/tags/djagen_old/djagen/templates/main/generic.css new file mode 100755 index 0000000..6614810 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/generic.css @@ -0,0 +1,52 @@ +/* Basic tags */ +a img { + border: 0px; +} + +pre { + overflow: auto; +} + +/* Anchors */ +a { + color: #333638; +} + +a:visited { + color: #757B7F; +} + +a:active { + color: #ff0000; +} + +/* Basic classes */ + +.none { /* to add paragraph spacing to various elements for ttys */ + margin: 0px; + padding: 0px; +} + +.invisible { /* stuff that should appear when this css isn't used */ + margin: 0px; + border: 0px; + padding: 0px; + height: 0px; + visibility: hidden; +} + +.left { + margin: 10px; + padding: 0px; + float: left; +} + +.right { + margin: 10px; + padding: 0px; + float: right; +} + +.center { + text-align: center; +} diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/index.html b/DJAGEN/tags/djagen_old/djagen/templates/main/index.html new file mode 100755 index 0000000..35a41a3 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/index.html @@ -0,0 +1,915 @@ + + + + Linux Gezegeni + + + + + + + + + + + + + + + + + + + + + +
    + + +
    + + +
    +

    16 Mart 2010

    + +
    + + +
    +
    +
    +

    + +Yakın Doğu’da Seminer Rüzgarları +

    +
    +
    +
    +

    Geçen haftadan beri Yakın Doğu Üniversitesi’nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir’lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu’nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.

    +

    Yakın Doğu’ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.

    +

    +

    Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.

    +

    +

    Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.

    +

    +

    Ali Erdinc’in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.

    +

    +

    Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite duyuru sayfası, Facebook ve Twitter‘dan takip edebileceklerini söyleyelim. Hatta Kıbrıs’ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.

    +

    Lefkoşa’ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs’tan bildirdi.

    +

    +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    +

    05 Şubat 2010

    + +
    + + +
    +
    +
    +

    + +100 ml +

    +
    +
    +
    +

    1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs’a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.

    +

    Şimdi olayın benim açımdan garip noktalarına gelelim

    +

    * Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.

    +

    * Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.

    +

    * Elimle çöpe attım, o çok koydu.

    +

    Ben de bunun üzerine Ulaştırma Bakanlığı’na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006′da İngiltere’de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006′da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç’te, ABD ve Kanada’da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt’lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:

    +

    “Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.”

    +

    Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.

    +

    Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.

    +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    +

    29 Ocak 2010

    + +
    + + +
    +
    +
    +

    + +Artık Sun yok! +

    +
    +
    +
    +

    iPsunoraclead haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun’ı satın alma işlemini bitirdi. Artık www.sun.com adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.

    +

    Beni en çok ilgilendiren konular ise Sun’ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.

    +

    Umarım hepimiz için mutlu son olur…

    +

    Ek: Kültür Mantarı‘nın yönlendirmesi ile James Gosling’in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım…

    +

    sunrip


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    24 Aralık 2009

    + +
    + + +
    +
    +
    +

    + +EMO 13. Ulusal Kongresi +

    +
    +
    +
    +

    EMO’nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan 13. Ulusal Kongresi kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda “Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz” ve 11.30-12.30 arasında da “Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.

    +

    Genel olarak yüklü bir programı olan bu etkinlikte çeşitli LKD seminerleri de olacak. Buyrunuz geliniz!


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    24 Eylül 2009

    + +
    + + +
    +
    +
    +

    + +Intel, Atom, Moblin +

    +
    +
    +
    +

    Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel’e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin’i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel’den üç önemli açıklama oldu…

    +

    Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. Atom Developer Program‘ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var… ( Ben kayıt olacağım :) )

    +

    İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin’in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir akıllı telefon üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu :) Geçenlerde de yazmıştım,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız…


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    25 Ağustos 2009

    + +
    + + +
    +
    +
    +

    + +Teknik Destek Kopya Kağıtı +

    +
    +
    +
    +

    xkcd’de geçen gün yayınlanan bu teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.

    +

    teknikdestek
    +İsteyenler için ODF hali de burada


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    18 Ağustos 2009

    + +
    + + +
    +
    +
    +

    + +Korsan Değil “Fikir Hırsızı” +

    +
    +
    +
    +

    Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu’nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma başlatmış. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.

    +

    Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek :) Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor…

    +

    Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.

    +

    Son olarak bir haber daha verelim Pirate Bay’in 23 GB’lik arşivi de paylaşıma açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin :) Korsan değil özgür yazılım kullanın!


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    07 Temmuz 2009

    + +
    + + +
    +
    +
    +

    + +Mobil Cihazlar Dünyasında Neler Oluyor? +

    +
    +
    +
    +

    moblinBir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. Android, WebOS ve iPhone OS‘a  karşı Symbian‘ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan’ı açık kaynak kodlu olarak  bu vakfa devretmişti.

    +

    Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC’lerin geliştirilmesine olanak sağladı ve NetBook’lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.

    +

    Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : Moblin.

    +

    Moblin’e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia’nın kendi tabletlerinde kullanmak amacıyla ürettiği Maemo‘yu desteklemeye karar verdiğini açıkladı. Intel’de Moblin’i Linux Vakfı’na devrettiğini ve destek konusunda da Novell’le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia’nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo’yu Qt’ye taşıyacağını ilan etti.

    +

    İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo’yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID’ler üretip bunlarda Mameo’mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian’ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID’ler konusunda neler planlıyor? Bu planları içerisinde Moblin’i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo’ya yatırım mı yapacaklar? NetBook’larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?

    +

    Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    17 Haziran 2009

    + +
    + + +
    +
    +
    +

    + +LKD Genel Kurulu için Ankara’ya +

    +
    +
    +
    +

    Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara’ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan’dan…

    +

    ***

    +

    Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,

    +

    TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca
    +kurum.
    +Evet bu kurum malesef bilet satmak istemiyor.

    +

    1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir
    +sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları
    +portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)

    +

    2- web sitesindeki bilet satış uygulamasında banka sıra makinelerinin bir
    +türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk
    +karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin
    +bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir
    +koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve
    +internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.
    +(İnternet olsun dedim, sonuç yok dedi.)

    +

    3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,
    +veee… Evet, biz satıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii
    +ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.

    +

    4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize
    +bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş
    +dönüş 1 tam 1 öğrenci istiyorum. Satıcı önce
    +- G/D kesmiyorum diyor buradan.!
    +- Nasıl yani?
    +- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?
    +Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.
    +- Elbette G/D niye alayım indirim var diyorum.
    +Neyse girip deniyor, gelen koltuk numaralarını soruyorum.
    +- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)
    +- Değiştiremiyor musunuz?
    +- Malesef.
    +- Internet sürümüne mi giriyorsunuz diyorum ister istemez.
    +- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen
    +üzerine ek komisyon ekleniyor sadece.)
    +- Kim koltuk seçtiriyor bana ?
    +- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.

    +

    5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.
    +Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk
    +seçebiliyor musunuz?
    +- Bakalım yer boş olursa seçebiliriz diyor satıcı bu kez.
    +- Ohh nihayet.
    +- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.
    +- Öğrencide G/D farkı yok cevabı geliyor.
    +- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız
    +öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe
    +yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat
    +eder.)
    +- Kredi kartımı, peşin mi?
    +- DIINN ! kredi kartı.. var dimi?
    +- Evet, 112 TL
    +- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.

    +

    Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye
    +kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak
    +bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.

    +

    Velhasıl,
    +Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX
    +Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX

    +

    Hayırlı yolculuklar.

    +

    =====================
    +Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor
    +daha. 2-3 nolarda satılan yerler var.

    +

    Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya
    +satış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi
    +bekliyor olabilir, kimbilir?

    +

    Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    16 Haziran 2009

    + +
    + + +
    +
    +
    +

    + +IE, WTW ve Gıda Yardımı +

    +
    +
    +
    +

    wfp-wtwBugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft’un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8′in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara buradan ulaşabilirsiniz…

    +

    Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin TechCrunch‘da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.

    +

    İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin… Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı’nın Walk The Web kampanyasına bir göz atmanızı öneririm…

    + +

    Son olarak da bugünlerde herkese önerdiğim gibi Yuva ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    28 Mayıs 2009

    + +
    + + +
    +
    +
    +

    + +TBD Bilişim Kongresi’nde Özgür Yazılım Paneli +

    +
    +
    +
    +

    TBD’nin bu yıl 3.sünü düzenlediği İstanbul Bilişim Kongresi‘nde Pazar günü saat 14:00′de Özgür Yazılım Paneli olacaktır. Panel’de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur…

    +

    Yer: Marmara Üniversitesi Nişantaşı Kampüsü
    +Erdal İnönü Bilim ve Kültür Merkezi
    +Tarih: 31 Mayıs Pazar, 14:00 - 15:20
    +Oturum başkanı: Görkem Çetin
    +Konuşmacılar: Enver Altın, Hakan Uygun, Cahit Cengizhan


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    13 Nisan 2009

    + +
    + + +
    +
    +
    +

    + +Sıralama Algoritmaları +

    +
    +
    +
    +

    Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu siteye bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz…


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/jquery.cookie.min.js b/DJAGEN/tags/djagen_old/djagen/templates/main/jquery.cookie.min.js new file mode 100755 index 0000000..aab4864 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/jquery.cookie.min.js @@ -0,0 +1 @@ +jQuery.cookie=function(B,I,L){if(typeof I!="undefined"){L=L||{};if(I===null){I="";L.expires=-1}var E="";if(L.expires&&(typeof L.expires=="number"||L.expires.toUTCString)){var F;if(typeof L.expires=="number"){F=new Date();F.setTime(F.getTime()+(L.expires*24*60*60*1000))}else{F=L.expires}E="; expires="+F.toUTCString()}var K=L.path?"; path="+(L.path):"";var G=L.domain?"; domain="+(L.domain):"";var A=L.secure?"; secure":"";document.cookie=[B,"=",encodeURIComponent(I),E,K,G,A].join("")}else{var D=null;if(document.cookie&&document.cookie!=""){var J=document.cookie.split(";");for(var H=0;H)[^>]*$|^#(\w+)$/,isSimple=/^.[^:#\[\.]*$/,undefined;jQuery.fn=jQuery.prototype={init:function(selector,context){selector=selector||document;if(selector.nodeType){this[0]=selector;this.length=1;return this;}if(typeof selector=="string"){var match=quickExpr.exec(selector);if(match&&(match[1]||!context)){if(match[1])selector=jQuery.clean([match[1]],context);else{var elem=document.getElementById(match[3]);if(elem){if(elem.id!=match[3])return jQuery().find(selector);return jQuery(elem);}selector=[];}}else +return jQuery(context).find(selector);}else if(jQuery.isFunction(selector))return jQuery(document)[jQuery.fn.ready?"ready":"load"](selector);return this.setArray(jQuery.makeArray(selector));},jquery:"1.2.6",size:function(){return this.length;},length:0,get:function(num){return num==undefined?jQuery.makeArray(this):this[num];},pushStack:function(elems){var ret=jQuery(elems);ret.prevObject=this;return ret;},setArray:function(elems){this.length=0;Array.prototype.push.apply(this,elems);return this;},each:function(callback,args){return jQuery.each(this,callback,args);},index:function(elem){var ret=-1;return jQuery.inArray(elem&&elem.jquery?elem[0]:elem,this);},attr:function(name,value,type){var options=name;if(name.constructor==String)if(value===undefined)return this[0]&&jQuery[type||"attr"](this[0],name);else{options={};options[name]=value;}return this.each(function(i){for(name in options)jQuery.attr(type?this.style:this,name,jQuery.prop(this,options[name],type,i,name));});},css:function(key,value){if((key=='width'||key=='height')&&parseFloat(value)<0)value=undefined;return this.attr(key,value,"curCSS");},text:function(text){if(typeof text!="object"&&text!=null)return this.empty().append((this[0]&&this[0].ownerDocument||document).createTextNode(text));var ret="";jQuery.each(text||this,function(){jQuery.each(this.childNodes,function(){if(this.nodeType!=8)ret+=this.nodeType!=1?this.nodeValue:jQuery.fn.text([this]);});});return ret;},wrapAll:function(html){if(this[0])jQuery(html,this[0].ownerDocument).clone().insertBefore(this[0]).map(function(){var elem=this;while(elem.firstChild)elem=elem.firstChild;return elem;}).append(this);return this;},wrapInner:function(html){return this.each(function(){jQuery(this).contents().wrapAll(html);});},wrap:function(html){return this.each(function(){jQuery(this).wrapAll(html);});},append:function(){return this.domManip(arguments,true,false,function(elem){if(this.nodeType==1)this.appendChild(elem);});},prepend:function(){return this.domManip(arguments,true,true,function(elem){if(this.nodeType==1)this.insertBefore(elem,this.firstChild);});},before:function(){return this.domManip(arguments,false,false,function(elem){this.parentNode.insertBefore(elem,this);});},after:function(){return this.domManip(arguments,false,true,function(elem){this.parentNode.insertBefore(elem,this.nextSibling);});},end:function(){return this.prevObject||jQuery([]);},find:function(selector){var elems=jQuery.map(this,function(elem){return jQuery.find(selector,elem);});return this.pushStack(/[^+>] [^+>]/.test(selector)||selector.indexOf("..")>-1?jQuery.unique(elems):elems);},clone:function(events){var ret=this.map(function(){if(jQuery.browser.msie&&!jQuery.isXMLDoc(this)){var clone=this.cloneNode(true),container=document.createElement("div");container.appendChild(clone);return jQuery.clean([container.innerHTML])[0];}else +return this.cloneNode(true);});var clone=ret.find("*").andSelf().each(function(){if(this[expando]!=undefined)this[expando]=null;});if(events===true)this.find("*").andSelf().each(function(i){if(this.nodeType==3)return;var events=jQuery.data(this,"events");for(var type in events)for(var handler in events[type])jQuery.event.add(clone[i],type,events[type][handler],events[type][handler].data);});return ret;},filter:function(selector){return this.pushStack(jQuery.isFunction(selector)&&jQuery.grep(this,function(elem,i){return selector.call(elem,i);})||jQuery.multiFilter(selector,this));},not:function(selector){if(selector.constructor==String)if(isSimple.test(selector))return this.pushStack(jQuery.multiFilter(selector,this,true));else +selector=jQuery.multiFilter(selector,this);var isArrayLike=selector.length&&selector[selector.length-1]!==undefined&&!selector.nodeType;return this.filter(function(){return isArrayLike?jQuery.inArray(this,selector)<0:this!=selector;});},add:function(selector){return this.pushStack(jQuery.unique(jQuery.merge(this.get(),typeof selector=='string'?jQuery(selector):jQuery.makeArray(selector))));},is:function(selector){return!!selector&&jQuery.multiFilter(selector,this).length>0;},hasClass:function(selector){return this.is("."+selector);},val:function(value){if(value==undefined){if(this.length){var elem=this[0];if(jQuery.nodeName(elem,"select")){var index=elem.selectedIndex,values=[],options=elem.options,one=elem.type=="select-one";if(index<0)return null;for(var i=one?index:0,max=one?index+1:options.length;i=0||jQuery.inArray(this.name,value)>=0);else if(jQuery.nodeName(this,"select")){var values=jQuery.makeArray(value);jQuery("option",this).each(function(){this.selected=(jQuery.inArray(this.value,values)>=0||jQuery.inArray(this.text,values)>=0);});if(!values.length)this.selectedIndex=-1;}else +this.value=value;});},html:function(value){return value==undefined?(this[0]?this[0].innerHTML:null):this.empty().append(value);},replaceWith:function(value){return this.after(value).remove();},eq:function(i){return this.slice(i,i+1);},slice:function(){return this.pushStack(Array.prototype.slice.apply(this,arguments));},map:function(callback){return this.pushStack(jQuery.map(this,function(elem,i){return callback.call(elem,i,elem);}));},andSelf:function(){return this.add(this.prevObject);},data:function(key,value){var parts=key.split(".");parts[1]=parts[1]?"."+parts[1]:"";if(value===undefined){var data=this.triggerHandler("getData"+parts[1]+"!",[parts[0]]);if(data===undefined&&this.length)data=jQuery.data(this[0],key);return data===undefined&&parts[1]?this.data(parts[0]):data;}else +return this.trigger("setData"+parts[1]+"!",[parts[0],value]).each(function(){jQuery.data(this,key,value);});},removeData:function(key){return this.each(function(){jQuery.removeData(this,key);});},domManip:function(args,table,reverse,callback){var clone=this.length>1,elems;return this.each(function(){if(!elems){elems=jQuery.clean(args,this.ownerDocument);if(reverse)elems.reverse();}var obj=this;if(table&&jQuery.nodeName(this,"table")&&jQuery.nodeName(elems[0],"tr"))obj=this.getElementsByTagName("tbody")[0]||this.appendChild(this.ownerDocument.createElement("tbody"));var scripts=jQuery([]);jQuery.each(elems,function(){var elem=clone?jQuery(this).clone(true)[0]:this;if(jQuery.nodeName(elem,"script"))scripts=scripts.add(elem);else{if(elem.nodeType==1)scripts=scripts.add(jQuery("script",elem).remove());callback.call(obj,elem);}});scripts.each(evalScript);});}};jQuery.fn.init.prototype=jQuery.fn;function evalScript(i,elem){if(elem.src)jQuery.ajax({url:elem.src,async:false,dataType:"script"});else +jQuery.globalEval(elem.text||elem.textContent||elem.innerHTML||"");if(elem.parentNode)elem.parentNode.removeChild(elem);}function now(){return+new Date;}jQuery.extend=jQuery.fn.extend=function(){var target=arguments[0]||{},i=1,length=arguments.length,deep=false,options;if(target.constructor==Boolean){deep=target;target=arguments[1]||{};i=2;}if(typeof target!="object"&&typeof target!="function")target={};if(length==i){target=this;--i;}for(;i-1;}},swap:function(elem,options,callback){var old={};for(var name in options){old[name]=elem.style[name];elem.style[name]=options[name];}callback.call(elem);for(var name in options)elem.style[name]=old[name];},css:function(elem,name,force){if(name=="width"||name=="height"){var val,props={position:"absolute",visibility:"hidden",display:"block"},which=name=="width"?["Left","Right"]:["Top","Bottom"];function getWH(){val=name=="width"?elem.offsetWidth:elem.offsetHeight;var padding=0,border=0;jQuery.each(which,function(){padding+=parseFloat(jQuery.curCSS(elem,"padding"+this,true))||0;border+=parseFloat(jQuery.curCSS(elem,"border"+this+"Width",true))||0;});val-=Math.round(padding+border);}if(jQuery(elem).is(":visible"))getWH();else +jQuery.swap(elem,props,getWH);return Math.max(0,val);}return jQuery.curCSS(elem,name,force);},curCSS:function(elem,name,force){var ret,style=elem.style;function color(elem){if(!jQuery.browser.safari)return false;var ret=defaultView.getComputedStyle(elem,null);return!ret||ret.getPropertyValue("color")=="";}if(name=="opacity"&&jQuery.browser.msie){ret=jQuery.attr(style,"opacity");return ret==""?"1":ret;}if(jQuery.browser.opera&&name=="display"){var save=style.outline;style.outline="0 solid black";style.outline=save;}if(name.match(/float/i))name=styleFloat;if(!force&&style&&style[name])ret=style[name];else if(defaultView.getComputedStyle){if(name.match(/float/i))name="float";name=name.replace(/([A-Z])/g,"-$1").toLowerCase();var computedStyle=defaultView.getComputedStyle(elem,null);if(computedStyle&&!color(elem))ret=computedStyle.getPropertyValue(name);else{var swap=[],stack=[],a=elem,i=0;for(;a&&color(a);a=a.parentNode)stack.unshift(a);for(;i]*?)\/>/g,function(all,front,tag){return tag.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i)?all:front+">";});var tags=jQuery.trim(elem).toLowerCase(),div=context.createElement("div");var wrap=!tags.indexOf("",""]||!tags.indexOf("",""]||tags.match(/^<(thead|tbody|tfoot|colg|cap)/)&&[1,"","
    "]||!tags.indexOf("",""]||(!tags.indexOf("",""]||!tags.indexOf("",""]||jQuery.browser.msie&&[1,"div
    ","
    "]||[0,"",""];div.innerHTML=wrap[1]+elem+wrap[2];while(wrap[0]--)div=div.lastChild;if(jQuery.browser.msie){var tbody=!tags.indexOf(""&&tags.indexOf("=0;--j)if(jQuery.nodeName(tbody[j],"tbody")&&!tbody[j].childNodes.length)tbody[j].parentNode.removeChild(tbody[j]);if(/^\s/.test(elem))div.insertBefore(context.createTextNode(elem.match(/^\s*/)[0]),div.firstChild);}elem=jQuery.makeArray(div.childNodes);}if(elem.length===0&&(!jQuery.nodeName(elem,"form")&&!jQuery.nodeName(elem,"select")))return;if(elem[0]==undefined||jQuery.nodeName(elem,"form")||elem.options)ret.push(elem);else +ret=jQuery.merge(ret,elem);});return ret;},attr:function(elem,name,value){if(!elem||elem.nodeType==3||elem.nodeType==8)return undefined;var notxml=!jQuery.isXMLDoc(elem),set=value!==undefined,msie=jQuery.browser.msie;name=notxml&&jQuery.props[name]||name;if(elem.tagName){var special=/href|src|style/.test(name);if(name=="selected"&&jQuery.browser.safari)elem.parentNode.selectedIndex;if(name in elem&¬xml&&!special){if(set){if(name=="type"&&jQuery.nodeName(elem,"input")&&elem.parentNode)throw"type property can't be changed";elem[name]=value;}if(jQuery.nodeName(elem,"form")&&elem.getAttributeNode(name))return elem.getAttributeNode(name).nodeValue;return elem[name];}if(msie&¬xml&&name=="style")return jQuery.attr(elem.style,"cssText",value);if(set)elem.setAttribute(name,""+value);var attr=msie&¬xml&&special?elem.getAttribute(name,2):elem.getAttribute(name);return attr===null?undefined:attr;}if(msie&&name=="opacity"){if(set){elem.zoom=1;elem.filter=(elem.filter||"").replace(/alpha\([^)]*\)/,"")+(parseInt(value)+''=="NaN"?"":"alpha(opacity="+value*100+")");}return elem.filter&&elem.filter.indexOf("opacity=")>=0?(parseFloat(elem.filter.match(/opacity=([^)]*)/)[1])/100)+'':"";}name=name.replace(/-([a-z])/ig,function(all,letter){return letter.toUpperCase();});if(set)elem[name]=value;return elem[name];},trim:function(text){return(text||"").replace(/^\s+|\s+$/g,"");},makeArray:function(array){var ret=[];if(array!=null){var i=array.length;if(i==null||array.split||array.setInterval||array.call)ret[0]=array;else +while(i)ret[--i]=array[i];}return ret;},inArray:function(elem,array){for(var i=0,length=array.length;i*",this).remove();while(this.firstChild)this.removeChild(this.firstChild);}},function(name,fn){jQuery.fn[name]=function(){return this.each(fn,arguments);};});jQuery.each(["Height","Width"],function(i,name){var type=name.toLowerCase();jQuery.fn[type]=function(size){return this[0]==window?jQuery.browser.opera&&document.body["client"+name]||jQuery.browser.safari&&window["inner"+name]||document.compatMode=="CSS1Compat"&&document.documentElement["client"+name]||document.body["client"+name]:this[0]==document?Math.max(Math.max(document.body["scroll"+name],document.documentElement["scroll"+name]),Math.max(document.body["offset"+name],document.documentElement["offset"+name])):size==undefined?(this.length?jQuery.css(this[0],type):null):this.css(type,size.constructor==String?size:size+"px");};});function num(elem,prop){return elem[0]&&parseInt(jQuery.curCSS(elem[0],prop,true),10)||0;}var chars=jQuery.browser.safari&&parseInt(jQuery.browser.version)<417?"(?:[\\w*_-]|\\\\.)":"(?:[\\w\u0128-\uFFFF*_-]|\\\\.)",quickChild=new RegExp("^>\\s*("+chars+"+)"),quickID=new RegExp("^("+chars+"+)(#)("+chars+"+)"),quickClass=new RegExp("^([#.]?)("+chars+"*)");jQuery.extend({expr:{"":function(a,i,m){return m[2]=="*"||jQuery.nodeName(a,m[2]);},"#":function(a,i,m){return a.getAttribute("id")==m[2];},":":{lt:function(a,i,m){return im[3]-0;},nth:function(a,i,m){return m[3]-0==i;},eq:function(a,i,m){return m[3]-0==i;},first:function(a,i){return i==0;},last:function(a,i,m,r){return i==r.length-1;},even:function(a,i){return i%2==0;},odd:function(a,i){return i%2;},"first-child":function(a){return a.parentNode.getElementsByTagName("*")[0]==a;},"last-child":function(a){return jQuery.nth(a.parentNode.lastChild,1,"previousSibling")==a;},"only-child":function(a){return!jQuery.nth(a.parentNode.lastChild,2,"previousSibling");},parent:function(a){return a.firstChild;},empty:function(a){return!a.firstChild;},contains:function(a,i,m){return(a.textContent||a.innerText||jQuery(a).text()||"").indexOf(m[3])>=0;},visible:function(a){return"hidden"!=a.type&&jQuery.css(a,"display")!="none"&&jQuery.css(a,"visibility")!="hidden";},hidden:function(a){return"hidden"==a.type||jQuery.css(a,"display")=="none"||jQuery.css(a,"visibility")=="hidden";},enabled:function(a){return!a.disabled;},disabled:function(a){return a.disabled;},checked:function(a){return a.checked;},selected:function(a){return a.selected||jQuery.attr(a,"selected");},text:function(a){return"text"==a.type;},radio:function(a){return"radio"==a.type;},checkbox:function(a){return"checkbox"==a.type;},file:function(a){return"file"==a.type;},password:function(a){return"password"==a.type;},submit:function(a){return"submit"==a.type;},image:function(a){return"image"==a.type;},reset:function(a){return"reset"==a.type;},button:function(a){return"button"==a.type||jQuery.nodeName(a,"button");},input:function(a){return/input|select|textarea|button/i.test(a.nodeName);},has:function(a,i,m){return jQuery.find(m[3],a).length;},header:function(a){return/h\d/i.test(a.nodeName);},animated:function(a){return jQuery.grep(jQuery.timers,function(fn){return a==fn.elem;}).length;}}},parse:[/^(\[) *@?([\w-]+) *([!*$^~=]*) *('?"?)(.*?)\4 *\]/,/^(:)([\w-]+)\("?'?(.*?(\(.*?\))?[^(]*?)"?'?\)/,new RegExp("^([:.#]*)("+chars+"+)")],multiFilter:function(expr,elems,not){var old,cur=[];while(expr&&expr!=old){old=expr;var f=jQuery.filter(expr,elems,not);expr=f.t.replace(/^\s*,\s*/,"");cur=not?elems=f.r:jQuery.merge(cur,f.r);}return cur;},find:function(t,context){if(typeof t!="string")return[t];if(context&&context.nodeType!=1&&context.nodeType!=9)return[];context=context||document;var ret=[context],done=[],last,nodeName;while(t&&last!=t){var r=[];last=t;t=jQuery.trim(t);var foundToken=false,re=quickChild,m=re.exec(t);if(m){nodeName=m[1].toUpperCase();for(var i=0;ret[i];i++)for(var c=ret[i].firstChild;c;c=c.nextSibling)if(c.nodeType==1&&(nodeName=="*"||c.nodeName.toUpperCase()==nodeName))r.push(c);ret=r;t=t.replace(re,"");if(t.indexOf(" ")==0)continue;foundToken=true;}else{re=/^([>+~])\s*(\w*)/i;if((m=re.exec(t))!=null){r=[];var merge={};nodeName=m[2].toUpperCase();m=m[1];for(var j=0,rl=ret.length;j=0;if(!not&&pass||not&&!pass)tmp.push(r[i]);}return tmp;},filter:function(t,r,not){var last;while(t&&t!=last){last=t;var p=jQuery.parse,m;for(var i=0;p[i];i++){m=p[i].exec(t);if(m){t=t.substring(m[0].length);m[2]=m[2].replace(/\\/g,"");break;}}if(!m)break;if(m[1]==":"&&m[2]=="not")r=isSimple.test(m[3])?jQuery.filter(m[3],r,true).r:jQuery(r).not(m[3]);else if(m[1]==".")r=jQuery.classFilter(r,m[2],not);else if(m[1]=="["){var tmp=[],type=m[3];for(var i=0,rl=r.length;i=0)^not)tmp.push(a);}r=tmp;}else if(m[1]==":"&&m[2]=="nth-child"){var merge={},tmp=[],test=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(m[3]=="even"&&"2n"||m[3]=="odd"&&"2n+1"||!/\D/.test(m[3])&&"0n+"+m[3]||m[3]),first=(test[1]+(test[2]||1))-0,last=test[3]-0;for(var i=0,rl=r.length;i=0)add=true;if(add^not)tmp.push(node);}r=tmp;}else{var fn=jQuery.expr[m[1]];if(typeof fn=="object")fn=fn[m[2]];if(typeof fn=="string")fn=eval("false||function(a,i){return "+fn+";}");r=jQuery.grep(r,function(elem,i){return fn(elem,i,m,r);},not);}}return{r:r,t:t};},dir:function(elem,dir){var matched=[],cur=elem[dir];while(cur&&cur!=document){if(cur.nodeType==1)matched.push(cur);cur=cur[dir];}return matched;},nth:function(cur,result,dir,elem){result=result||1;var num=0;for(;cur;cur=cur[dir])if(cur.nodeType==1&&++num==result)break;return cur;},sibling:function(n,elem){var r=[];for(;n;n=n.nextSibling){if(n.nodeType==1&&n!=elem)r.push(n);}return r;}});jQuery.event={add:function(elem,types,handler,data){if(elem.nodeType==3||elem.nodeType==8)return;if(jQuery.browser.msie&&elem.setInterval)elem=window;if(!handler.guid)handler.guid=this.guid++;if(data!=undefined){var fn=handler;handler=this.proxy(fn,function(){return fn.apply(this,arguments);});handler.data=data;}var events=jQuery.data(elem,"events")||jQuery.data(elem,"events",{}),handle=jQuery.data(elem,"handle")||jQuery.data(elem,"handle",function(){if(typeof jQuery!="undefined"&&!jQuery.event.triggered)return jQuery.event.handle.apply(arguments.callee.elem,arguments);});handle.elem=elem;jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];handler.type=parts[1];var handlers=events[type];if(!handlers){handlers=events[type]={};if(!jQuery.event.special[type]||jQuery.event.special[type].setup.call(elem)===false){if(elem.addEventListener)elem.addEventListener(type,handle,false);else if(elem.attachEvent)elem.attachEvent("on"+type,handle);}}handlers[handler.guid]=handler;jQuery.event.global[type]=true;});elem=null;},guid:1,global:{},remove:function(elem,types,handler){if(elem.nodeType==3||elem.nodeType==8)return;var events=jQuery.data(elem,"events"),ret,index;if(events){if(types==undefined||(typeof types=="string"&&types.charAt(0)=="."))for(var type in events)this.remove(elem,type+(types||""));else{if(types.type){handler=types.handler;types=types.type;}jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];if(events[type]){if(handler)delete events[type][handler.guid];else +for(handler in events[type])if(!parts[1]||events[type][handler].type==parts[1])delete events[type][handler];for(ret in events[type])break;if(!ret){if(!jQuery.event.special[type]||jQuery.event.special[type].teardown.call(elem)===false){if(elem.removeEventListener)elem.removeEventListener(type,jQuery.data(elem,"handle"),false);else if(elem.detachEvent)elem.detachEvent("on"+type,jQuery.data(elem,"handle"));}ret=null;delete events[type];}}});}for(ret in events)break;if(!ret){var handle=jQuery.data(elem,"handle");if(handle)handle.elem=null;jQuery.removeData(elem,"events");jQuery.removeData(elem,"handle");}}},trigger:function(type,data,elem,donative,extra){data=jQuery.makeArray(data);if(type.indexOf("!")>=0){type=type.slice(0,-1);var exclusive=true;}if(!elem){if(this.global[type])jQuery("*").add([window,document]).trigger(type,data);}else{if(elem.nodeType==3||elem.nodeType==8)return undefined;var val,ret,fn=jQuery.isFunction(elem[type]||null),event=!data[0]||!data[0].preventDefault;if(event){data.unshift({type:type,target:elem,preventDefault:function(){},stopPropagation:function(){},timeStamp:now()});data[0][expando]=true;}data[0].type=type;if(exclusive)data[0].exclusive=true;var handle=jQuery.data(elem,"handle");if(handle)val=handle.apply(elem,data);if((!fn||(jQuery.nodeName(elem,'a')&&type=="click"))&&elem["on"+type]&&elem["on"+type].apply(elem,data)===false)val=false;if(event)data.shift();if(extra&&jQuery.isFunction(extra)){ret=extra.apply(elem,val==null?data:data.concat(val));if(ret!==undefined)val=ret;}if(fn&&donative!==false&&val!==false&&!(jQuery.nodeName(elem,'a')&&type=="click")){this.triggered=true;try{elem[type]();}catch(e){}}this.triggered=false;}return val;},handle:function(event){var val,ret,namespace,all,handlers;event=arguments[0]=jQuery.event.fix(event||window.event);namespace=event.type.split(".");event.type=namespace[0];namespace=namespace[1];all=!namespace&&!event.exclusive;handlers=(jQuery.data(this,"events")||{})[event.type];for(var j in handlers){var handler=handlers[j];if(all||handler.type==namespace){event.handler=handler;event.data=handler.data;ret=handler.apply(this,arguments);if(val!==false)val=ret;if(ret===false){event.preventDefault();event.stopPropagation();}}}return val;},fix:function(event){if(event[expando]==true)return event;var originalEvent=event;event={originalEvent:originalEvent};var props="altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode metaKey newValue originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target timeStamp toElement type view wheelDelta which".split(" ");for(var i=props.length;i;i--)event[props[i]]=originalEvent[props[i]];event[expando]=true;event.preventDefault=function(){if(originalEvent.preventDefault)originalEvent.preventDefault();originalEvent.returnValue=false;};event.stopPropagation=function(){if(originalEvent.stopPropagation)originalEvent.stopPropagation();originalEvent.cancelBubble=true;};event.timeStamp=event.timeStamp||now();if(!event.target)event.target=event.srcElement||document;if(event.target.nodeType==3)event.target=event.target.parentNode;if(!event.relatedTarget&&event.fromElement)event.relatedTarget=event.fromElement==event.target?event.toElement:event.fromElement;if(event.pageX==null&&event.clientX!=null){var doc=document.documentElement,body=document.body;event.pageX=event.clientX+(doc&&doc.scrollLeft||body&&body.scrollLeft||0)-(doc.clientLeft||0);event.pageY=event.clientY+(doc&&doc.scrollTop||body&&body.scrollTop||0)-(doc.clientTop||0);}if(!event.which&&((event.charCode||event.charCode===0)?event.charCode:event.keyCode))event.which=event.charCode||event.keyCode;if(!event.metaKey&&event.ctrlKey)event.metaKey=event.ctrlKey;if(!event.which&&event.button)event.which=(event.button&1?1:(event.button&2?3:(event.button&4?2:0)));return event;},proxy:function(fn,proxy){proxy.guid=fn.guid=fn.guid||proxy.guid||this.guid++;return proxy;},special:{ready:{setup:function(){bindReady();return;},teardown:function(){return;}},mouseenter:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseover",jQuery.event.special.mouseenter.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseover",jQuery.event.special.mouseenter.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseenter";return jQuery.event.handle.apply(this,arguments);}},mouseleave:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseout",jQuery.event.special.mouseleave.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseout",jQuery.event.special.mouseleave.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseleave";return jQuery.event.handle.apply(this,arguments);}}}};jQuery.fn.extend({bind:function(type,data,fn){return type=="unload"?this.one(type,data,fn):this.each(function(){jQuery.event.add(this,type,fn||data,fn&&data);});},one:function(type,data,fn){var one=jQuery.event.proxy(fn||data,function(event){jQuery(this).unbind(event,one);return(fn||data).apply(this,arguments);});return this.each(function(){jQuery.event.add(this,type,one,fn&&data);});},unbind:function(type,fn){return this.each(function(){jQuery.event.remove(this,type,fn);});},trigger:function(type,data,fn){return this.each(function(){jQuery.event.trigger(type,data,this,true,fn);});},triggerHandler:function(type,data,fn){return this[0]&&jQuery.event.trigger(type,data,this[0],false,fn);},toggle:function(fn){var args=arguments,i=1;while(i=0){var selector=url.slice(off,url.length);url=url.slice(0,off);}callback=callback||function(){};var type="GET";if(params)if(jQuery.isFunction(params)){callback=params;params=null;}else{params=jQuery.param(params);type="POST";}var self=this;jQuery.ajax({url:url,type:type,dataType:"html",data:params,complete:function(res,status){if(status=="success"||status=="notmodified")self.html(selector?jQuery("
    ").append(res.responseText.replace(//g,"")).find(selector):res.responseText);self.each(callback,[res.responseText,status,res]);}});return this;},serialize:function(){return jQuery.param(this.serializeArray());},serializeArray:function(){return this.map(function(){return jQuery.nodeName(this,"form")?jQuery.makeArray(this.elements):this;}).filter(function(){return this.name&&!this.disabled&&(this.checked||/select|textarea/i.test(this.nodeName)||/text|hidden|password/i.test(this.type));}).map(function(i,elem){var val=jQuery(this).val();return val==null?null:val.constructor==Array?jQuery.map(val,function(val,i){return{name:elem.name,value:val};}):{name:elem.name,value:val};}).get();}});jQuery.each("ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","),function(i,o){jQuery.fn[o]=function(f){return this.bind(o,f);};});var jsc=now();jQuery.extend({get:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data=null;}return jQuery.ajax({type:"GET",url:url,data:data,success:callback,dataType:type});},getScript:function(url,callback){return jQuery.get(url,null,callback,"script");},getJSON:function(url,data,callback){return jQuery.get(url,data,callback,"json");},post:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data={};}return jQuery.ajax({type:"POST",url:url,data:data,success:callback,dataType:type});},ajaxSetup:function(settings){jQuery.extend(jQuery.ajaxSettings,settings);},ajaxSettings:{url:location.href,global:true,type:"GET",timeout:0,contentType:"application/x-www-form-urlencoded",processData:true,async:true,data:null,username:null,password:null,accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},ajax:function(s){s=jQuery.extend(true,s,jQuery.extend(true,{},jQuery.ajaxSettings,s));var jsonp,jsre=/=\?(&|$)/g,status,data,type=s.type.toUpperCase();if(s.data&&s.processData&&typeof s.data!="string")s.data=jQuery.param(s.data);if(s.dataType=="jsonp"){if(type=="GET"){if(!s.url.match(jsre))s.url+=(s.url.match(/\?/)?"&":"?")+(s.jsonp||"callback")+"=?";}else if(!s.data||!s.data.match(jsre))s.data=(s.data?s.data+"&":"")+(s.jsonp||"callback")+"=?";s.dataType="json";}if(s.dataType=="json"&&(s.data&&s.data.match(jsre)||s.url.match(jsre))){jsonp="jsonp"+jsc++;if(s.data)s.data=(s.data+"").replace(jsre,"="+jsonp+"$1");s.url=s.url.replace(jsre,"="+jsonp+"$1");s.dataType="script";window[jsonp]=function(tmp){data=tmp;success();complete();window[jsonp]=undefined;try{delete window[jsonp];}catch(e){}if(head)head.removeChild(script);};}if(s.dataType=="script"&&s.cache==null)s.cache=false;if(s.cache===false&&type=="GET"){var ts=now();var ret=s.url.replace(/(\?|&)_=.*?(&|$)/,"$1_="+ts+"$2");s.url=ret+((ret==s.url)?(s.url.match(/\?/)?"&":"?")+"_="+ts:"");}if(s.data&&type=="GET"){s.url+=(s.url.match(/\?/)?"&":"?")+s.data;s.data=null;}if(s.global&&!jQuery.active++)jQuery.event.trigger("ajaxStart");var remote=/^(?:\w+:)?\/\/([^\/?#]+)/;if(s.dataType=="script"&&type=="GET"&&remote.test(s.url)&&remote.exec(s.url)[1]!=location.host){var head=document.getElementsByTagName("head")[0];var script=document.createElement("script");script.src=s.url;if(s.scriptCharset)script.charset=s.scriptCharset;if(!jsonp){var done=false;script.onload=script.onreadystatechange=function(){if(!done&&(!this.readyState||this.readyState=="loaded"||this.readyState=="complete")){done=true;success();complete();head.removeChild(script);}};}head.appendChild(script);return undefined;}var requestDone=false;var xhr=window.ActiveXObject?new ActiveXObject("Microsoft.XMLHTTP"):new XMLHttpRequest();if(s.username)xhr.open(type,s.url,s.async,s.username,s.password);else +xhr.open(type,s.url,s.async);try{if(s.data)xhr.setRequestHeader("Content-Type",s.contentType);if(s.ifModified)xhr.setRequestHeader("If-Modified-Since",jQuery.lastModified[s.url]||"Thu, 01 Jan 1970 00:00:00 GMT");xhr.setRequestHeader("X-Requested-With","XMLHttpRequest");xhr.setRequestHeader("Accept",s.dataType&&s.accepts[s.dataType]?s.accepts[s.dataType]+", */*":s.accepts._default);}catch(e){}if(s.beforeSend&&s.beforeSend(xhr,s)===false){s.global&&jQuery.active--;xhr.abort();return false;}if(s.global)jQuery.event.trigger("ajaxSend",[xhr,s]);var onreadystatechange=function(isTimeout){if(!requestDone&&xhr&&(xhr.readyState==4||isTimeout=="timeout")){requestDone=true;if(ival){clearInterval(ival);ival=null;}status=isTimeout=="timeout"&&"timeout"||!jQuery.httpSuccess(xhr)&&"error"||s.ifModified&&jQuery.httpNotModified(xhr,s.url)&&"notmodified"||"success";if(status=="success"){try{data=jQuery.httpData(xhr,s.dataType,s.dataFilter);}catch(e){status="parsererror";}}if(status=="success"){var modRes;try{modRes=xhr.getResponseHeader("Last-Modified");}catch(e){}if(s.ifModified&&modRes)jQuery.lastModified[s.url]=modRes;if(!jsonp)success();}else +jQuery.handleError(s,xhr,status);complete();if(s.async)xhr=null;}};if(s.async){var ival=setInterval(onreadystatechange,13);if(s.timeout>0)setTimeout(function(){if(xhr){xhr.abort();if(!requestDone)onreadystatechange("timeout");}},s.timeout);}try{xhr.send(s.data);}catch(e){jQuery.handleError(s,xhr,null,e);}if(!s.async)onreadystatechange();function success(){if(s.success)s.success(data,status);if(s.global)jQuery.event.trigger("ajaxSuccess",[xhr,s]);}function complete(){if(s.complete)s.complete(xhr,status);if(s.global)jQuery.event.trigger("ajaxComplete",[xhr,s]);if(s.global&&!--jQuery.active)jQuery.event.trigger("ajaxStop");}return xhr;},handleError:function(s,xhr,status,e){if(s.error)s.error(xhr,status,e);if(s.global)jQuery.event.trigger("ajaxError",[xhr,s,e]);},active:0,httpSuccess:function(xhr){try{return!xhr.status&&location.protocol=="file:"||(xhr.status>=200&&xhr.status<300)||xhr.status==304||xhr.status==1223||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpNotModified:function(xhr,url){try{var xhrRes=xhr.getResponseHeader("Last-Modified");return xhr.status==304||xhrRes==jQuery.lastModified[url]||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpData:function(xhr,type,filter){var ct=xhr.getResponseHeader("content-type"),xml=type=="xml"||!type&&ct&&ct.indexOf("xml")>=0,data=xml?xhr.responseXML:xhr.responseText;if(xml&&data.documentElement.tagName=="parsererror")throw"parsererror";if(filter)data=filter(data,type);if(type=="script")jQuery.globalEval(data);if(type=="json")data=eval("("+data+")");return data;},param:function(a){var s=[];if(a.constructor==Array||a.jquery)jQuery.each(a,function(){s.push(encodeURIComponent(this.name)+"="+encodeURIComponent(this.value));});else +for(var j in a)if(a[j]&&a[j].constructor==Array)jQuery.each(a[j],function(){s.push(encodeURIComponent(j)+"="+encodeURIComponent(this));});else +s.push(encodeURIComponent(j)+"="+encodeURIComponent(jQuery.isFunction(a[j])?a[j]():a[j]));return s.join("&").replace(/%20/g,"+");}});jQuery.fn.extend({show:function(speed,callback){return speed?this.animate({height:"show",width:"show",opacity:"show"},speed,callback):this.filter(":hidden").each(function(){this.style.display=this.oldblock||"";if(jQuery.css(this,"display")=="none"){var elem=jQuery("<"+this.tagName+" />").appendTo("body");this.style.display=elem.css("display");if(this.style.display=="none")this.style.display="block";elem.remove();}}).end();},hide:function(speed,callback){return speed?this.animate({height:"hide",width:"hide",opacity:"hide"},speed,callback):this.filter(":visible").each(function(){this.oldblock=this.oldblock||jQuery.css(this,"display");this.style.display="none";}).end();},_toggle:jQuery.fn.toggle,toggle:function(fn,fn2){return jQuery.isFunction(fn)&&jQuery.isFunction(fn2)?this._toggle.apply(this,arguments):fn?this.animate({height:"toggle",width:"toggle",opacity:"toggle"},fn,fn2):this.each(function(){jQuery(this)[jQuery(this).is(":hidden")?"show":"hide"]();});},slideDown:function(speed,callback){return this.animate({height:"show"},speed,callback);},slideUp:function(speed,callback){return this.animate({height:"hide"},speed,callback);},slideToggle:function(speed,callback){return this.animate({height:"toggle"},speed,callback);},fadeIn:function(speed,callback){return this.animate({opacity:"show"},speed,callback);},fadeOut:function(speed,callback){return this.animate({opacity:"hide"},speed,callback);},fadeTo:function(speed,to,callback){return this.animate({opacity:to},speed,callback);},animate:function(prop,speed,easing,callback){var optall=jQuery.speed(speed,easing,callback);return this[optall.queue===false?"each":"queue"](function(){if(this.nodeType!=1)return false;var opt=jQuery.extend({},optall),p,hidden=jQuery(this).is(":hidden"),self=this;for(p in prop){if(prop[p]=="hide"&&hidden||prop[p]=="show"&&!hidden)return opt.complete.call(this);if(p=="height"||p=="width"){opt.display=jQuery.css(this,"display");opt.overflow=this.style.overflow;}}if(opt.overflow!=null)this.style.overflow="hidden";opt.curAnim=jQuery.extend({},prop);jQuery.each(prop,function(name,val){var e=new jQuery.fx(self,opt,name);if(/toggle|show|hide/.test(val))e[val=="toggle"?hidden?"show":"hide":val](prop);else{var parts=val.toString().match(/^([+-]=)?([\d+-.]+)(.*)$/),start=e.cur(true)||0;if(parts){var end=parseFloat(parts[2]),unit=parts[3]||"px";if(unit!="px"){self.style[name]=(end||1)+unit;start=((end||1)/e.cur(true))*start;self.style[name]=start+unit;}if(parts[1])end=((parts[1]=="-="?-1:1)*end)+start;e.custom(start,end,unit);}else +e.custom(start,val,"");}});return true;});},queue:function(type,fn){if(jQuery.isFunction(type)||(type&&type.constructor==Array)){fn=type;type="fx";}if(!type||(typeof type=="string"&&!fn))return queue(this[0],type);return this.each(function(){if(fn.constructor==Array)queue(this,type,fn);else{queue(this,type).push(fn);if(queue(this,type).length==1)fn.call(this);}});},stop:function(clearQueue,gotoEnd){var timers=jQuery.timers;if(clearQueue)this.queue([]);this.each(function(){for(var i=timers.length-1;i>=0;i--)if(timers[i].elem==this){if(gotoEnd)timers[i](true);timers.splice(i,1);}});if(!gotoEnd)this.dequeue();return this;}});var queue=function(elem,type,array){if(elem){type=type||"fx";var q=jQuery.data(elem,type+"queue");if(!q||array)q=jQuery.data(elem,type+"queue",jQuery.makeArray(array));}return q;};jQuery.fn.dequeue=function(type){type=type||"fx";return this.each(function(){var q=queue(this,type);q.shift();if(q.length)q[0].call(this);});};jQuery.extend({speed:function(speed,easing,fn){var opt=speed&&speed.constructor==Object?speed:{complete:fn||!fn&&easing||jQuery.isFunction(speed)&&speed,duration:speed,easing:fn&&easing||easing&&easing.constructor!=Function&&easing};opt.duration=(opt.duration&&opt.duration.constructor==Number?opt.duration:jQuery.fx.speeds[opt.duration])||jQuery.fx.speeds.def;opt.old=opt.complete;opt.complete=function(){if(opt.queue!==false)jQuery(this).dequeue();if(jQuery.isFunction(opt.old))opt.old.call(this);};return opt;},easing:{linear:function(p,n,firstNum,diff){return firstNum+diff*p;},swing:function(p,n,firstNum,diff){return((-Math.cos(p*Math.PI)/2)+0.5)*diff+firstNum;}},timers:[],timerId:null,fx:function(elem,options,prop){this.options=options;this.elem=elem;this.prop=prop;if(!options.orig)options.orig={};}});jQuery.fx.prototype={update:function(){if(this.options.step)this.options.step.call(this.elem,this.now,this);(jQuery.fx.step[this.prop]||jQuery.fx.step._default)(this);if(this.prop=="height"||this.prop=="width")this.elem.style.display="block";},cur:function(force){if(this.elem[this.prop]!=null&&this.elem.style[this.prop]==null)return this.elem[this.prop];var r=parseFloat(jQuery.css(this.elem,this.prop,force));return r&&r>-10000?r:parseFloat(jQuery.curCSS(this.elem,this.prop))||0;},custom:function(from,to,unit){this.startTime=now();this.start=from;this.end=to;this.unit=unit||this.unit||"px";this.now=this.start;this.pos=this.state=0;this.update();var self=this;function t(gotoEnd){return self.step(gotoEnd);}t.elem=this.elem;jQuery.timers.push(t);if(jQuery.timerId==null){jQuery.timerId=setInterval(function(){var timers=jQuery.timers;for(var i=0;ithis.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;var done=true;for(var i in this.options.curAnim)if(this.options.curAnim[i]!==true)done=false;if(done){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;this.elem.style.display=this.options.display;if(jQuery.css(this.elem,"display")=="none")this.elem.style.display="block";}if(this.options.hide)this.elem.style.display="none";if(this.options.hide||this.options.show)for(var p in this.options.curAnim)jQuery.attr(this.elem.style,p,this.options.orig[p]);}if(done)this.options.complete.call(this.elem);return false;}else{var n=t-this.startTime;this.state=n/this.options.duration;this.pos=jQuery.easing[this.options.easing||(jQuery.easing.swing?"swing":"linear")](this.state,n,0,1,this.options.duration);this.now=this.start+((this.end-this.start)*this.pos);this.update();}return true;}};jQuery.extend(jQuery.fx,{speeds:{slow:600,fast:200,def:400},step:{scrollLeft:function(fx){fx.elem.scrollLeft=fx.now;},scrollTop:function(fx){fx.elem.scrollTop=fx.now;},opacity:function(fx){jQuery.attr(fx.elem.style,"opacity",fx.now);},_default:function(fx){fx.elem.style[fx.prop]=fx.now+fx.unit;}}});jQuery.fn.offset=function(){var left=0,top=0,elem=this[0],results;if(elem)with(jQuery.browser){var parent=elem.parentNode,offsetChild=elem,offsetParent=elem.offsetParent,doc=elem.ownerDocument,safari2=safari&&parseInt(version)<522&&!/adobeair/i.test(userAgent),css=jQuery.curCSS,fixed=css(elem,"position")=="fixed";if(elem.getBoundingClientRect){var box=elem.getBoundingClientRect();add(box.left+Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),box.top+Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));add(-doc.documentElement.clientLeft,-doc.documentElement.clientTop);}else{add(elem.offsetLeft,elem.offsetTop);while(offsetParent){add(offsetParent.offsetLeft,offsetParent.offsetTop);if(mozilla&&!/^t(able|d|h)$/i.test(offsetParent.tagName)||safari&&!safari2)border(offsetParent);if(!fixed&&css(offsetParent,"position")=="fixed")fixed=true;offsetChild=/^body$/i.test(offsetParent.tagName)?offsetChild:offsetParent;offsetParent=offsetParent.offsetParent;}while(parent&&parent.tagName&&!/^body|html$/i.test(parent.tagName)){if(!/^inline|table.*$/i.test(css(parent,"display")))add(-parent.scrollLeft,-parent.scrollTop);if(mozilla&&css(parent,"overflow")!="visible")border(parent);parent=parent.parentNode;}if((safari2&&(fixed||css(offsetChild,"position")=="absolute"))||(mozilla&&css(offsetChild,"position")!="absolute"))add(-doc.body.offsetLeft,-doc.body.offsetTop);if(fixed)add(Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));}results={top:top,left:left};}function border(elem){add(jQuery.curCSS(elem,"borderLeftWidth",true),jQuery.curCSS(elem,"borderTopWidth",true));}function add(l,t){left+=parseInt(l,10)||0;top+=parseInt(t,10)||0;}return results;};jQuery.fn.extend({position:function(){var left=0,top=0,results;if(this[0]){var offsetParent=this.offsetParent(),offset=this.offset(),parentOffset=/^body|html$/i.test(offsetParent[0].tagName)?{top:0,left:0}:offsetParent.offset();offset.top-=num(this,'marginTop');offset.left-=num(this,'marginLeft');parentOffset.top+=num(offsetParent,'borderTopWidth');parentOffset.left+=num(offsetParent,'borderLeftWidth');results={top:offset.top-parentOffset.top,left:offset.left-parentOffset.left};}return results;},offsetParent:function(){var offsetParent=this[0].offsetParent;while(offsetParent&&(!/^body|html$/i.test(offsetParent.tagName)&&jQuery.css(offsetParent,'position')=='static'))offsetParent=offsetParent.offsetParent;return jQuery(offsetParent);}});jQuery.each(['Left','Top'],function(i,name){var method='scroll'+name;jQuery.fn[method]=function(val){if(!this[0])return;return val!=undefined?this.each(function(){this==window||this==document?window.scrollTo(!i?val:jQuery(window).scrollLeft(),i?val:jQuery(window).scrollTop()):this[method]=val;}):this[0]==window||this[0]==document?self[i?'pageYOffset':'pageXOffset']||jQuery.boxModel&&document.documentElement[method]||document.body[method]:this[0][method];};});jQuery.each(["Height","Width"],function(i,name){var tl=i?"Left":"Top",br=i?"Right":"Bottom";jQuery.fn["inner"+name]=function(){return this[name.toLowerCase()]()+num(this,"padding"+tl)+num(this,"padding"+br);};jQuery.fn["outer"+name]=function(margin){return this["inner"+name]()+num(this,"border"+tl+"Width")+num(this,"border"+br+"Width")+(margin?num(this,"margin"+tl)+num(this,"margin"+br):0);};});})(); \ No newline at end of file diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/layout.css b/DJAGEN/tags/djagen_old/djagen/templates/main/layout.css new file mode 100755 index 0000000..f93cc40 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/layout.css @@ -0,0 +1,115 @@ +/* body */ + +body { + margin: 0px; + background-color: white; + color: black; +} + +/* header */ + +#banner { + background: url('images/hdr-planet.png') no-repeat; + background-color: #9db8d2; + background-position: right; + border-bottom: 1px solid #807d74; + height: 48px; +} + +#logo { + position: absolute; + top: 25px; + left: 15px; + background: url('images/logo.png') no-repeat; + width: 64px; + height: 54px; +} + +#logo images { + border: 0px; + width: 64px; + height: 64px; +} + +#hdrNav { + margin-top: 6px; + margin-left: 84px; + margin-right: 190px; + padding-right: 3em; + font-size: small; +} + +#hdrNav a { + color: #000000; +} + +#body { + margin: 0 190px 0 0; + padding: 1.5em 3em 0em 1em; +} + +#body *:first-child { + margin-top: 0; +} + +#copyright { + clear: both; + padding-bottom: 1em; + text-align: center; + font-size: small; + color: #aaaaaa; +} + +#copyright a { + color: #c0c0c0; +} + +#copyright a:visited { + color: #c0c0c0; +} + + +/* SIDEBAR */ + +#sidebar { + position: absolute; + top: 80px; + right: 0px; + /*width: 210px;*/ + border-left: 1px solid #ffffff; + background-color: #eeeeee; +} + +#sidebar div.section { + width: 190px; + padding: 1em; + border-top: 1px solid #ffffff; + border-bottom: 1px solid #d9d9d9; +} + +#sidebar div.section h3 { + font-weight: bold; + font-size: 110%; +} + +#sidebar *:first-child { + margin-top: 0; +} + +#sidebar *:last-child { + margin-bottom: 0; +} + +#sidebar div.section ul { + padding: 0; + list-style-type: none; +} + +#sidebar div.section ul ul { + padding-left: 1.5em; + list-style-type: square; +} + +#sidebar div.section p { + font-size: small; +} diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/main.html b/DJAGEN/tags/djagen_old/djagen/templates/main/main.html new file mode 100755 index 0000000..2f23a10 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/main.html @@ -0,0 +1,298 @@ + + + + Linux Gezegeni + + + + + + + + + + + + + + + +
    + + +
    + + +
    + + {% for entry in entries_list|slice:items_per_page %} + {% autoescape off %} + + {% ifequal entry.entry_id.is_approved 1 %} + + {% ifchanged %}

    {{ entry.date|date:"d F Y" }}

    {% endifchanged %} + + +
    + + +
    +
    +
    +

    + {{ entry.title }} +

    +
    +
    +
    + + + {{ entry.content_html|truncatewords_html:truncate_words }} + +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    + {% endifequal %} + + {% endautoescape %} + + {% endfor %} + + + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/main.tmpl b/DJAGEN/tags/djagen_old/djagen/templates/main/main.tmpl new file mode 100755 index 0000000..2f23a10 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/main.tmpl @@ -0,0 +1,298 @@ + + + + Linux Gezegeni + + + + + + + + + + + + + + + +
    + + +
    + + +
    + + {% for entry in entries_list|slice:items_per_page %} + {% autoescape off %} + + {% ifequal entry.entry_id.is_approved 1 %} + + {% ifchanged %}

    {{ entry.date|date:"d F Y" }}

    {% endifchanged %} + + +
    + + +
    +
    +
    +

    + {{ entry.title }} +

    +
    +
    +
    + + + {{ entry.content_html|truncatewords_html:truncate_words }} + +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    + {% endifequal %} + + {% endautoescape %} + + {% endfor %} + + + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/nlayout.css b/DJAGEN/tags/djagen_old/djagen/templates/main/nlayout.css new file mode 100755 index 0000000..72be5ec --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/nlayout.css @@ -0,0 +1,316 @@ +body { + margin: 0px; + padding: 0px; + font-family: sans-serif; + background-color: white; + color: black; +} + +/* GEZEGEN strip */ + +#gezegen-sites { + list-style: none; + background: #2E3436 url(img/sites-bg.png) 0 100% repeat-x; + text-align: right; + padding: 0 1ex; + margin: 0; + font-size: 75%; +} + +#gezegen-sites ul { + margin: 0; + padding: 0; +} + +#gezegen-sites li { + display: inline; + background: url(img/sites-sp.png) 0 0 no-repeat; + padding-top: 10px; + padding-bottom: 8px; + margin-left: 0px; + margin-top: 0px; +} + +#gezegen-sites li a { + font-weight: bold; + color: #FFFFFF; + margin: 0 2ex; + text-decoration: none; + line-height: 30px; +} + +#gezegen-sites li a:hover { + text-decoration: underline; +} + +#gezegen-sites .home { + float: left; + background: url(img/sites-sp.png) 100% 0 no-repeat; + padding-top: 0; + padding-bottom: 0; +} + +#gezegen-sites .home a { + float: left; + margin-left: 0; + padding-left: 27px; +} + +/* Site header and masthead */ + +#header { + position: relative; + width: 100%; + background-color: #729FCF; +} + +#masthead { + display: table; + /* req for ie */ + border-top: 1px solid #729FCF; +} + +#site-logo { + vertical-align: middle; + display: table-cell; + float: left; + border: 0; + padding: 10px; + /* req for ie */ + margin-top: expression((this.parentElement.height - this.height)/2); +} + +#site-title { + vertical-align: middle; + display: table-cell; + /* req for ie */ + margin-top: expression((this.parentElement.height - this.height)/2); +} + +#site-name { + margin: 0; +} + +#site-name a { + font-size: xx-large; + font-weight: bold; + text-decoration: none; + color: black; +} + +#site-slogan { + font-size: 80%; + font-style: italic; + margin: 0; +} + +#footer-link { + position: absolute; + right: 1em; + bottom: 1em; + margin: 0; + font-size: 80%; + color: black; + text-decoration: none; + background: url(img/help-about.png) left no-repeat; + padding-left: 20px; +} +#footer-link:hover { text-decoration: underline; } + +div.breadcrumb { + font-size: 75%; +} + +/* Search form */ + +#search { + position: relative; + float: right; + top: 1em; + right: 1em; +} + +#search input.form-text, #search input[name="q"] { + border: 1px solid #888888; + padding: 0.5ex; + background-position: center !important; +} + +#search input.form-submit, #search input[name="sa"] { + background: white url(img/search-icon.gif) no-repeat; + padding: 1px 1px 1px 15px; + border: 1px solid #888888; + display: none; +} + +/* Tabs */ +#site-tabs { + position: absolute; + right: 0px; + bottom: 0px; + width: 100%; + background: transparent url(img/bar.png) 0 100% repeat-x; + margin: 0; + padding: 0; +} + +#site-tabs ul { + float: right; + list-style: none; + margin: 0; + margin-right: 3ex; + font-size: 75%; + clear: none; +} + +#site-tabs ul li { + float: left; + margin: 0; + margin-left: 0.2ex; +} + +#site-tabs ul li a:hover { + color: #111111; +} + +#site-tabs ul li a { + float: left; + text-decoration: none; + color: #555555; + background: #eeeeee; + padding: 7px 7px 7px 7px; + border-bottom: 2px solid #CCCCCC; +} + +#site-tabs ul li a.active { + color: #3566A5; + background: white; + border-top: 2px solid #5555ff; + border-bottom: 2px solid white; +} + +/* Content */ +#content { + margin: 0px auto 0px auto; + padding: 0px 1em 0px 1em; + max-width: 65em; +} + +#content h1.title { + margin: 0; +} + +/* Feeds & Footer */ +#feeds { + background: #dcdcdc url(img/feeds-bg.png) repeat-x left top; + padding: 0.5em 0px 0.5em 0px; +} +#feeds h3 { + margin: 0px; + padding: 0px 3% 0px 3%; + font-size: 100%; +} +#feeds h3 a { + background: transparent url(img/dt-closed.png) no-repeat left top; + padding-left: 20px; + margin-left: -20px; + color: #000; + text-decoration: none; +} +#feeds h3.open a { + background: transparent url(img/dt-open.png) no-repeat left top; +} +#feedlist { + display: none; + margin: 0.5em 1em 0.5em 1em; + background-color: #eee; + -moz-border-radius: 1em; + padding: 1em; + column-count: 1; + column-gap: 1em; + -moz-column-count: 1; + -moz-column-gap: 1em; + -webkit-column-count: 1; + -webkit-column-gap: 1em; +} +#feedlist ul { + margin: 0px; + padding: 0px; + list-style-type: none; + font-size: 90%; +} +#feedlist ul li * { + vertical-align: middle; +} +#feedlist ul li input { + margin: 0.2em; +} +#feedlist ul li a { + color: #000; + text-decoration: none; +} +#feedlist ul li a:hover { + text-decoration: underline; +} +#feedlist ul li a.message { + color: #999; +} +#feedlist ul li a img { + margin: 0px 0.2em; + border: 0px; +} + +#footer { + background: black url(img/footer-bg.png) repeat-x left top; + padding: 1%; + font-size: x-small; + color: #ccc; + overflow: hidden; + line-height: 150%; +} + +#footer a { + color: #000000; + font-weight: bold; + text-decoration: none; +} +#footer a:hover { + text-decoration: underline; +} + +#footer .column { + float: left; + width: 20%; + margin-right: 3%; +} + +#footer .section { + margin-bottom: 1em; +} + +#footer .section h3 { + margin: 0; + font-size: 140%; +} + +#footer .section a img { + border: 1px solid #cccccc; +} + +#footer .section ul { + list-style: none; + margin-left: 0; + padding-left: 0; +} + +#fineprint { + display: inline; + float: right; + text-align: right; + width: 25%; +} + +#ownership { + margin-top: 2em; + font-size: 90%; +} diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/opml.xml b/DJAGEN/tags/djagen_old/djagen/templates/main/opml.xml new file mode 100755 index 0000000..6fea0e5 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/opml.xml @@ -0,0 +1,15 @@ + + + + Linux Gezegeni + Paz, 11 Nis 2010 23:16:31 +0000 + Paz, 11 Nis 2010 23:16:31 +0000 + Gezegen Ekibi + gezegen@linux.org.tr + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/planet.css b/DJAGEN/tags/djagen_old/djagen/templates/main/planet.css new file mode 100755 index 0000000..16af408 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/planet.css @@ -0,0 +1,134 @@ +div.entry { + clear: both; + margin-bottom: 2em; + margin-right: 1em; +} + +.post-contents img { padding: 10px; } + +div.person-info { + float: left; + position: relative; + left: 4px; + margin-top: 25px!important; + padding: 0 20px 30px 0; + width: 120px; + background: url(images/bubble/bubble-nipple.png) top right no-repeat; + text-align: center; +} + +div.person-info a { + text-decoration: none; + color: #666; +} + +div.post { + background: #fff url(images/bubble/bubble-left.png) left repeat-y; + margin-left: 140px; +} + +div.post2 { + background: url(images/bubble/bubble-right.png) right repeat-y; +} + +div.post-contents { + padding: 0 25px 0 25px; + margin-right: 10px; +} + +div.post-contents p { + line-height: 140%; + margin-top: 1em!important; +} + +div.post-contents blockquote { + color: #666; + line-height: 150%; +} + +div.post-contents:after { + content: ""; + display: block; + clear: both; +} + +h4.post-title, div.post-title { + background: url(images/bubble/bubble-top-left.png) top left no-repeat; + margin: 1em 0 0 0; +} + +h4.post-title a, div.post-title span { + display: block; + background: url(images/bubble/bubble-top-right.png) top right no-repeat; + padding: 22px 25px 0 25px; + font-weight: normal; + font-size: 140%; + text-decoration: none; +} + +h4.post-title a:hover { + text-decoration: underline; +} + +div.post-title span { + display: block; + height: 20px; + font-size: 90%; +} + +div.post-title { + display: block; +} + +div.post-header { + background: url(images/bubble/bubble-top.png) top repeat-x; +} + + +div.post-footer { + background: url(images/bubble/bubble-bottom.png) bottom repeat-x; +} + +div.post-footer p { + background: url(images/bubble/bubble-bottom-left.png) bottom left no-repeat; + margin: 0; +} + +div.post-footer p a { + display: block; + background: url(images/bubble/bubble-bottom-right.png) bottom right no-repeat; + padding: 15px 20px 20px 25px; + text-align: right; + font-size: 85%; + color: #999; + text-decoration: none; +} + +div.post-footer p a:hover { + color: inherit; + text-decoration: underline; +} + +h2.date { + color: #666; + font-weight: normal; + font-size: 130%; + padding-left: 9px; +} + +#sidebar ul li { + font-size: small; +} + +#sidebar ul li a { + text-decoration: none; +} + +#sidebar ul li a:hover { + text-decoration: underline; +} + +#sidebar .message { + cursor: help; + color: #666; +} diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/rss10.xml b/DJAGEN/tags/djagen_old/djagen/templates/main/rss10.xml new file mode 100755 index 0000000..5435da8 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/rss10.xml @@ -0,0 +1,223 @@ + + + + Linux Gezegeni + http://gezegen.linux.org.tr + Linux Gezegeni - http://gezegen.linux.org.tr + + + + + + + + + + + + + + + + + + + + + Oğuz Yarımtepe: Yakın Doğu’da Seminer Rüzgarları + http://feedproxy.google.com/~r/oguzy-gezegen/~3/dmDtp8fRToI/ + <p>Geçen haftadan beri Yakın Doğu Üniversitesi&#8217;nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir&#8217;lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu&#8217;nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.</p> +<p>Yakın Doğu&#8217;ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697.jpg"><img class="alignnone size-medium wp-image-99" title="Linux Nedir Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704.jpg"><img class="alignnone size-medium wp-image-100" title="Linux Nedir Seminer" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757.jpg"><img class="alignnone size-medium wp-image-101" title="YDU AEK Internet'in Yapı Taşları Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Ali Erdinc&#8217;in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759.jpg"><img class="alignnone size-medium wp-image-102" title="Internet'in Yapı Taşları" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite <a href="http://duyuru.neu.edu.tr">duyuru sayfası</a>, <a href="http://www.facebook.com/NearEastUniversity">Facebook</a> ve <a href="http://twitter.com/NearEastUniv">Twitter</a>&#8216;dan takip edebileceklerini söyleyelim. Hatta Kıbrıs&#8217;ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.</p> +<p>Lefkoşa&#8217;ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs&#8217;tan bildirdi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770.jpg"><img class="alignnone size-medium wp-image-103" title="Inovasyon Merkezi, tarla" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770-300x199.jpg" alt="" width="300" height="199" /></a></p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/dmDtp8fRToI" height="1" width="1" /> + Sal, 16 Mar 2010 17:40:18 +0000 + + + Oğuz Yarımtepe: 100 ml + http://feedproxy.google.com/~r/oguzy-gezegen/~3/nubepmpaYEk/ + <p>1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs&#8217;a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.</p> +<p>Şimdi olayın benim açımdan garip noktalarına gelelim</p> +<p>* Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.</p> +<p>* Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.</p> +<p>* Elimle çöpe attım, o çok koydu.</p> +<p>Ben de bunun üzerine Ulaştırma Bakanlığı&#8217;na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006&#8242;da İngiltere&#8217;de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006&#8242;da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç&#8217;te, ABD ve Kanada&#8217;da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt&#8217;lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:</p> +<p>&#8220;Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.&#8221;</p> +<p>Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.</p> +<p>Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.</p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/nubepmpaYEk" height="1" width="1" /> + Cum, 05 Şub 2010 12:19:21 +0000 + + + Hakan Uygun: Artık Sun yok! + http://www.hakanuygun.com/blog/?p=432 + <p>iP<a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif"><img class="alignleft size-full wp-image-434" title="sunoracle" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif" alt="sunoracle" width="202" height="122" /></a>ad haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun&#8217;ı satın alma işlemini bitirdi. Artık <a href="http://www.sun.com" target="_blank">www.sun.com</a> adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.</p> +<p>Beni en çok ilgilendiren konular ise Sun&#8217;ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.</p> +<p>Umarım hepimiz için mutlu son olur&#8230;</p> +<p><strong>Ek</strong>: <a href="http://www.kulturmantari.org/" target="_blank">Kültür Mantarı</a>&#8216;nın yönlendirmesi ile <a href="http://blogs.sun.com/jag/entry/so_long_old_friend" target="_blank">James Gosling&#8217;</a>in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım&#8230;</p> +<p><a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip.jpg"><img class="aligncenter size-medium wp-image-445" title="sunrip" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip-300x234.jpg" alt="sunrip" width="300" height="234" /></a></p> + Cum, 29 Oca 2010 09:28:25 +0000 + + + Hakan Uygun: EMO 13. Ulusal Kongresi + http://www.hakanuygun.com/blog/?p=381 + <p>EMO&#8217;nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan <a href="http://www.ulusalkongre.org" target="_blank">13. Ulusal Kongre</a>si kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda &#8220;Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz&#8221; ve 11.30-12.30 arasında da &#8220;Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.</p> +<p>Genel olarak yüklü bir programı olan bu etkinlikte çeşitli <a href="http://haber.linux.org.tr/2009/12/23-26-aralik-emo-ulusal-kongre-ankara-linux-seminerleri/" target="_blank">LKD seminerleri</a> de olacak. Buyrunuz geliniz!</p> + Prş, 24 Ara 2009 15:45:26 +0000 + + + Hakan Uygun: Intel, Atom, Moblin + http://www.hakanuygun.com/blog/?p=338 + <p>Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel&#8217;e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin&#8217;i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel&#8217;den üç önemli açıklama oldu&#8230;</p> +<p>Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. <a href="http://appdeveloper.intel.com/en-us/">Atom Developer Program</a>&#8216;ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var&#8230; ( Ben kayıt olacağım <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> )</p> +<p>İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin&#8217;in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir <a href="http://www.engadget.com/2009/09/22/intel-announces-moblin-2-1-for-phones/#continued">akıllı telefon</a> üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Geçenlerde de <a href="http://www.hakanuygun.com/blog/?p=279">yazmıştım</a>,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız&#8230;</p> + Prş, 24 Eyl 2009 09:00:51 +0000 + + + Hakan Uygun: Teknik Destek Kopya Kağıtı + http://www.hakanuygun.com/blog/?p=330 + <p>xkcd&#8217;de geçen gün yayınlanan <a href="http://xkcd.com/627/">bu</a> teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.</p> +<p><img class="aligncenter size-full wp-image-331" title="teknikdestek" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.png" alt="teknikdestek" width="468" height="461" /><br /> +İsteyenler için ODF hali de <a href="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.odg">burada</a></p> + Sal, 25 Ağu 2009 07:28:26 +0000 + + + Hakan Uygun: Korsan Değil “Fikir Hırsızı” + http://www.hakanuygun.com/blog/?p=312 + <p>Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu&#8217;nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma <a href="http://www.ntv.com.tr/id/24992251/" target="_blank">başlatmış</a>. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.</p> +<p>Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor&#8230;</p> +<p>Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.</p> +<p>Son olarak bir haber daha verelim Pirate Bay&#8217;in 23 GB&#8217;lik arşivi de <a href="http://thepiratebay.org/torrent/5053827" target="_blank">paylaşıma</a> açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Korsan değil özgür yazılım kullanın!</p> + Sal, 18 Ağu 2009 08:07:07 +0000 + + + Hakan Uygun: Mobil Cihazlar Dünyasında Neler Oluyor? + http://www.hakanuygun.com/blog/?p=279 + <p><img class="aligncenter size-full wp-image-282" title="moblin" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/07/moblin.jpg" alt="moblin" width="280" height="151" />Bir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. <a href="http://en.wikipedia.org/wiki/Android_os" target="_blank">Android</a>, <a href="http://en.wikipedia.org/wiki/WebOS" target="_blank">WebOS</a> ve <a href="http://en.wikipedia.org/wiki/IPhone_OS" target="_blank">iPhone OS</a>&#8216;a  karşı <a href="http://en.wikipedia.org/wiki/Symbian_OS" target="_blank">Symbian</a>&#8216;ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan&#8217;ı açık kaynak kodlu olarak  bu vakfa devretmişti.</p> +<p>Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC&#8217;lerin geliştirilmesine olanak sağladı ve NetBook&#8217;lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.</p> +<p>Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : <a href="http://en.wikipedia.org/wiki/Moblin" target="_blank">Moblin</a>.</p> +<p>Moblin&#8217;e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia&#8217;nın kendi tabletlerinde kullanmak amacıyla ürettiği <a href="http://en.wikipedia.org/wiki/Maemo_%28operating_system%29" target="_blank">Maemo</a>&#8216;yu desteklemeye karar verdiğini açıkladı. Intel&#8217;de Moblin&#8217;i Linux Vakfı&#8217;na devrettiğini ve destek konusunda da Novell&#8217;le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia&#8217;nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo&#8217;yu Qt&#8217;ye taşıyacağını ilan etti.</p> +<p>İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo&#8217;yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID&#8217;ler üretip bunlarda Mameo&#8217;mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian&#8217;ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID&#8217;ler konusunda neler planlıyor? Bu planları içerisinde Moblin&#8217;i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo&#8217;ya yatırım mı yapacaklar? NetBook&#8217;larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?</p> +<p>Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?</p> + Sal, 07 Tem 2009 11:04:23 +0000 + + + Hakan Uygun: LKD Genel Kurulu için Ankara’ya + http://www.hakanuygun.com/blog/?p=259 + <p>Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara&#8217;ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan&#8217;dan&#8230;</p> +<p>***</p> +<p>Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,</p> +<p>TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca<br /> +kurum.<br /> +Evet bu kurum malesef bilet satmak istemiyor.</p> +<p>1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir<br /> +sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları<br /> +portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)</p> +<p>2- web sitesindeki bilet <span id="OBJ_PREFIX_DWT255" class="Object"><span id="OBJ_PREFIX_DWT256" class="Object">sat</span></span>ış uygulamasında banka sıra makinelerinin bir<br /> +türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk<br /> +karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin<br /> +bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir<br /> +koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve<br /> +internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.<br /> +(İnternet olsun dedim, sonuç yok dedi.)</p> +<p>3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,<br /> +veee&#8230; Evet, biz <span id="OBJ_PREFIX_DWT257" class="Object"><span id="OBJ_PREFIX_DWT258" class="Object">sat</span></span>ıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii<br /> +ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.</p> +<p>4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize<br /> +bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş<br /> +dönüş 1 tam 1 öğrenci istiyorum. <span id="OBJ_PREFIX_DWT259" class="Object"><span id="OBJ_PREFIX_DWT260" class="Object">Sat</span></span>ıcı önce<br /> +- G/D kesmiyorum diyor buradan.!<br /> +- Nasıl yani?<br /> +- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?<br /> +Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.<br /> +- Elbette G/D niye alayım indirim var diyorum.<br /> +Neyse girip deniyor, gelen koltuk numaralarını soruyorum.<br /> +- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)<br /> +- Değiştiremiyor musunuz?<br /> +- Malesef.<br /> +- Internet sürümüne mi giriyorsunuz diyorum ister istemez.<br /> +- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen<br /> +üzerine ek komisyon ekleniyor sadece.)<br /> +- Kim koltuk seçtiriyor bana ?<br /> +- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.</p> +<p>5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.<br /> +Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk<br /> +seçebiliyor musunuz?<br /> +- Bakalım yer boş olursa seçebiliriz diyor <span id="OBJ_PREFIX_DWT261" class="Object"><span id="OBJ_PREFIX_DWT262" class="Object">sat</span></span>ıcı bu kez.<br /> +- Ohh nihayet.<br /> +- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.<br /> +- Öğrencide G/D farkı yok cevabı geliyor.<br /> +- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız<br /> +öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe<br /> +yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat<br /> +eder.)<br /> +- Kredi kartımı, peşin mi?<br /> +- DIINN ! kredi kartı.. var dimi?<br /> +- Evet, 112 TL<br /> +- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.</p> +<p>Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye<br /> +kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak<br /> +bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.</p> +<p>Velhasıl,<br /> +Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX<br /> +Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX</p> +<p>Hayırlı yolculuklar.</p> +<p>=====================<br /> +Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor<br /> +daha. 2-3 nolarda <span id="OBJ_PREFIX_DWT263" class="Object"><span id="OBJ_PREFIX_DWT264" class="Object">sat</span></span>ılan yerler var.</p> +<p>Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya<br /> +<span id="OBJ_PREFIX_DWT265" class="Object">sat</span>ış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi<br /> +bekliyor olabilir, kimbilir?</p> +<p>Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?</p> + Çrş, 17 Haz 2009 21:33:17 +0000 + + + Hakan Uygun: IE, WTW ve Gıda Yardımı + http://www.hakanuygun.com/blog/?p=248 + <p><a href="http://walktheweb.wfp.org/" target="_blank"><img class="aligncenter size-full wp-image-252" title="wfp-wtw" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/06/wfp-wtw.png" alt="wfp-wtw" width="512" height="240" /></a>Bugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft&#8217;un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8&#8242;in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara <a href="http://www.browserforthebetter.com/download.html" target="_blank">buradan</a> ulaşabilirsiniz&#8230;</p> +<p>Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin <a href="http://www.techcrunch.com/" target="_blank">TechCrunch</a>&#8216;da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.</p> +<p>İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin&#8230; Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı&#8217;nın <a href="http://walktheweb.wfp.org/" target="_blank">Walk The Web</a> kampanyasına bir göz atmanızı öneririm&#8230;</p> +<ul> +<li><a href="http://www.wfp.org/" target="_blank">www.wfp.org</a></li> +<li><a href="http://www.actionagainsthunger.org/" target="_blank">www.actionagainsthunger.org</a></li> +<li><a href="http://www.hakanuygun.com/blog/www.makepovertyhistory.org" target="_blank">www.makepovertyhistory.org</a></li> +<li><a href="http://www.standagainstpoverty.org" target="_blank">www.standagainstpoverty.org</a></li> +<li><a href="http://www.engineersagainstpoverty.org" target="_blank">www.engineersagainstpoverty.org</a></li> +<li><a href="http://www.whiteband.org" target="_blank">www.whiteband.org</a></li> +</ul> +<p>Son olarak da bugünlerde herkese önerdiğim gibi <a href="http://www.facebook.com/ext/share.php?sid=107634228486&h=FwnnE&u=6crnv&ref=mf" target="_blank">Yuva</a> ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.</p> + Sal, 16 Haz 2009 11:38:02 +0000 + + + Hakan Uygun: TBD Bilişim Kongresi’nde Özgür Yazılım Paneli + http://www.hakanuygun.com/blog/?p=244 + <p>TBD&#8217;nin bu yıl 3.sünü düzenlediği <a href="http://www.istanbulbilisimkongresi.org.tr/" target="_blank">İstanbul Bilişim Kongresi</a>&#8216;nde Pazar günü saat 14:00&#8242;de Özgür Yazılım Paneli olacaktır. Panel&#8217;de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur&#8230;</p> +<p><strong> Yer:</strong> Marmara Üniversitesi Nişantaşı Kampüsü<br /> +Erdal İnönü Bilim ve Kültür Merkezi<br /> +<strong>Tarih:</strong> 31 Mayıs Pazar, 14:00 - 15:20<br /> +<strong>Oturum başkanı:</strong> Görkem Çetin<br /> +<strong>Konuşmacılar:</strong> Enver Altın, Hakan Uygun, Cahit Cengizhan</p> + Prş, 28 May 2009 16:22:08 +0000 + + + Hakan Uygun: Sıralama Algoritmaları + http://www.hakanuygun.com/blog/?p=231 + <p>Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu <a href="http://www.sorting-algorithms.com/" target="_blank">siteye</a> bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz&#8230;</p> + Pzt, 13 Nis 2009 08:20:53 +0000 + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/rss20.xml b/DJAGEN/tags/djagen_old/djagen/templates/main/rss20.xml new file mode 100755 index 0000000..90ecbe4 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/rss20.xml @@ -0,0 +1,228 @@ + + + + + Linux Gezegeni + http://gezegen.linux.org.tr + en + Linux Gezegeni - http://gezegen.linux.org.tr + + + Oğuz Yarımtepe: Yakın Doğu’da Seminer Rüzgarları + http://www.loopbacking.info/blog/?p=98 + http://feedproxy.google.com/~r/oguzy-gezegen/~3/dmDtp8fRToI/ + + ]]> +<p>Geçen haftadan beri Yakın Doğu Üniversitesi&#8217;nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir&#8217;lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu&#8217;nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.</p> +<p>Yakın Doğu&#8217;ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697.jpg"><img class="alignnone size-medium wp-image-99" title="Linux Nedir Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0697-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704.jpg"><img class="alignnone size-medium wp-image-100" title="Linux Nedir Seminer" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0704-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757.jpg"><img class="alignnone size-medium wp-image-101" title="YDU AEK Internet'in Yapı Taşları Semineri" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0757-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Ali Erdinc&#8217;in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759.jpg"><img class="alignnone size-medium wp-image-102" title="Internet'in Yapı Taşları" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0759-300x199.jpg" alt="" width="300" height="199" /></a></p> +<p>Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite <a href="http://duyuru.neu.edu.tr">duyuru sayfası</a>, <a href="http://www.facebook.com/NearEastUniversity">Facebook</a> ve <a href="http://twitter.com/NearEastUniv">Twitter</a>&#8216;dan takip edebileceklerini söyleyelim. Hatta Kıbrıs&#8217;ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.</p> +<p>Lefkoşa&#8217;ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs&#8217;tan bildirdi.</p> +<p><a href="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770.jpg"><img class="alignnone size-medium wp-image-103" title="Inovasyon Merkezi, tarla" src="http://www.loopbacking.info/blog/wp-content/uploads/2010/03/DSC_0770-300x199.jpg" alt="" width="300" height="199" /></a></p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/dmDtp8fRToI" height="1" width="1" /> + Sal, 16 Mar 2010 17:40:18 +0000 + + + Oğuz Yarımtepe: 100 ml + http://www.loopbacking.info/blog/?p=95 + http://feedproxy.google.com/~r/oguzy-gezegen/~3/nubepmpaYEk/ + + ]]> +<p>1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs&#8217;a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.</p> +<p>Şimdi olayın benim açımdan garip noktalarına gelelim</p> +<p>* Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.</p> +<p>* Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.</p> +<p>* Elimle çöpe attım, o çok koydu.</p> +<p>Ben de bunun üzerine Ulaştırma Bakanlığı&#8217;na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006&#8242;da İngiltere&#8217;de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006&#8242;da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç&#8217;te, ABD ve Kanada&#8217;da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt&#8217;lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:</p> +<p>&#8220;Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.&#8221;</p> +<p>Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.</p> +<p>Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.</p> +<img src="http://feeds.feedburner.com/~r/oguzy-gezegen/~4/nubepmpaYEk" height="1" width="1" /> + Cum, 05 Şub 2010 12:19:21 +0000 + + + Hakan Uygun: Artık Sun yok! + http://www.hakanuygun.com/blog/?p=432 + http://www.hakanuygun.com/blog/?p=432 + +<p>iP<a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif"><img class="alignleft size-full wp-image-434" title="sunoracle" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunoracle.gif" alt="sunoracle" width="202" height="122" /></a>ad haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun&#8217;ı satın alma işlemini bitirdi. Artık <a href="http://www.sun.com" target="_blank">www.sun.com</a> adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.</p> +<p>Beni en çok ilgilendiren konular ise Sun&#8217;ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.</p> +<p>Umarım hepimiz için mutlu son olur&#8230;</p> +<p><strong>Ek</strong>: <a href="http://www.kulturmantari.org/" target="_blank">Kültür Mantarı</a>&#8216;nın yönlendirmesi ile <a href="http://blogs.sun.com/jag/entry/so_long_old_friend" target="_blank">James Gosling&#8217;</a>in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım&#8230;</p> +<p><a href="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip.jpg"><img class="aligncenter size-medium wp-image-445" title="sunrip" src="http://www.hakanuygun.com/blog/wp-content/uploads/2010/01/sunrip-300x234.jpg" alt="sunrip" width="300" height="234" /></a></p> + Cum, 29 Oca 2010 09:28:25 +0000 + + + Hakan Uygun: EMO 13. Ulusal Kongresi + http://www.hakanuygun.com/blog/?p=381 + http://www.hakanuygun.com/blog/?p=381 + +<p>EMO&#8217;nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan <a href="http://www.ulusalkongre.org" target="_blank">13. Ulusal Kongre</a>si kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda &#8220;Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz&#8221; ve 11.30-12.30 arasında da &#8220;Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.</p> +<p>Genel olarak yüklü bir programı olan bu etkinlikte çeşitli <a href="http://haber.linux.org.tr/2009/12/23-26-aralik-emo-ulusal-kongre-ankara-linux-seminerleri/" target="_blank">LKD seminerleri</a> de olacak. Buyrunuz geliniz!</p> + Prş, 24 Ara 2009 15:45:26 +0000 + + + Hakan Uygun: Intel, Atom, Moblin + http://www.hakanuygun.com/blog/?p=338 + http://www.hakanuygun.com/blog/?p=338 + +<p>Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel&#8217;e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin&#8217;i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel&#8217;den üç önemli açıklama oldu&#8230;</p> +<p>Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. <a href="http://appdeveloper.intel.com/en-us/">Atom Developer Program</a>&#8216;ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var&#8230; ( Ben kayıt olacağım <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> )</p> +<p>İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin&#8217;in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir <a href="http://www.engadget.com/2009/09/22/intel-announces-moblin-2-1-for-phones/#continued">akıllı telefon</a> üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Geçenlerde de <a href="http://www.hakanuygun.com/blog/?p=279">yazmıştım</a>,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız&#8230;</p> + Prş, 24 Eyl 2009 09:00:51 +0000 + + + Hakan Uygun: Teknik Destek Kopya Kağıtı + http://www.hakanuygun.com/blog/?p=330 + http://www.hakanuygun.com/blog/?p=330 + +<p>xkcd&#8217;de geçen gün yayınlanan <a href="http://xkcd.com/627/">bu</a> teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.</p> +<p><img class="aligncenter size-full wp-image-331" title="teknikdestek" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.png" alt="teknikdestek" width="468" height="461" /><br /> +İsteyenler için ODF hali de <a href="http://www.hakanuygun.com/blog/wp-content/uploads/2009/08/teknikdestek.odg">burada</a></p> + Sal, 25 Ağu 2009 07:28:26 +0000 + + + Hakan Uygun: Korsan Değil “Fikir Hırsızı” + http://www.hakanuygun.com/blog/?p=312 + http://www.hakanuygun.com/blog/?p=312 + +<p>Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu&#8217;nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma <a href="http://www.ntv.com.tr/id/24992251/" target="_blank">başlatmış</a>. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.</p> +<p>Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor&#8230;</p> +<p>Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.</p> +<p>Son olarak bir haber daha verelim Pirate Bay&#8217;in 23 GB&#8217;lik arşivi de <a href="http://thepiratebay.org/torrent/5053827" target="_blank">paylaşıma</a> açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin <img src="http://www.hakanuygun.com/blog/wp-includes/images/smilies/icon_smile.gif" alt=":)" class="wp-smiley" /> Korsan değil özgür yazılım kullanın!</p> + Sal, 18 Ağu 2009 08:07:07 +0000 + + + Hakan Uygun: Mobil Cihazlar Dünyasında Neler Oluyor? + http://www.hakanuygun.com/blog/?p=279 + http://www.hakanuygun.com/blog/?p=279 + +<p><img class="aligncenter size-full wp-image-282" title="moblin" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/07/moblin.jpg" alt="moblin" width="280" height="151" />Bir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. <a href="http://en.wikipedia.org/wiki/Android_os" target="_blank">Android</a>, <a href="http://en.wikipedia.org/wiki/WebOS" target="_blank">WebOS</a> ve <a href="http://en.wikipedia.org/wiki/IPhone_OS" target="_blank">iPhone OS</a>&#8216;a  karşı <a href="http://en.wikipedia.org/wiki/Symbian_OS" target="_blank">Symbian</a>&#8216;ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan&#8217;ı açık kaynak kodlu olarak  bu vakfa devretmişti.</p> +<p>Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC&#8217;lerin geliştirilmesine olanak sağladı ve NetBook&#8217;lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.</p> +<p>Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : <a href="http://en.wikipedia.org/wiki/Moblin" target="_blank">Moblin</a>.</p> +<p>Moblin&#8217;e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia&#8217;nın kendi tabletlerinde kullanmak amacıyla ürettiği <a href="http://en.wikipedia.org/wiki/Maemo_%28operating_system%29" target="_blank">Maemo</a>&#8216;yu desteklemeye karar verdiğini açıkladı. Intel&#8217;de Moblin&#8217;i Linux Vakfı&#8217;na devrettiğini ve destek konusunda da Novell&#8217;le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia&#8217;nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo&#8217;yu Qt&#8217;ye taşıyacağını ilan etti.</p> +<p>İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo&#8217;yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID&#8217;ler üretip bunlarda Mameo&#8217;mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian&#8217;ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID&#8217;ler konusunda neler planlıyor? Bu planları içerisinde Moblin&#8217;i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo&#8217;ya yatırım mı yapacaklar? NetBook&#8217;larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?</p> +<p>Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?</p> + Sal, 07 Tem 2009 11:04:23 +0000 + + + Hakan Uygun: LKD Genel Kurulu için Ankara’ya + http://www.hakanuygun.com/blog/?p=259 + http://www.hakanuygun.com/blog/?p=259 + +<p>Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara&#8217;ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan&#8217;dan&#8230;</p> +<p>***</p> +<p>Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,</p> +<p>TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca<br /> +kurum.<br /> +Evet bu kurum malesef bilet satmak istemiyor.</p> +<p>1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir<br /> +sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları<br /> +portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)</p> +<p>2- web sitesindeki bilet <span id="OBJ_PREFIX_DWT255" class="Object"><span id="OBJ_PREFIX_DWT256" class="Object">sat</span></span>ış uygulamasında banka sıra makinelerinin bir<br /> +türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk<br /> +karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin<br /> +bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir<br /> +koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve<br /> +internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.<br /> +(İnternet olsun dedim, sonuç yok dedi.)</p> +<p>3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,<br /> +veee&#8230; Evet, biz <span id="OBJ_PREFIX_DWT257" class="Object"><span id="OBJ_PREFIX_DWT258" class="Object">sat</span></span>ıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii<br /> +ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.</p> +<p>4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize<br /> +bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş<br /> +dönüş 1 tam 1 öğrenci istiyorum. <span id="OBJ_PREFIX_DWT259" class="Object"><span id="OBJ_PREFIX_DWT260" class="Object">Sat</span></span>ıcı önce<br /> +- G/D kesmiyorum diyor buradan.!<br /> +- Nasıl yani?<br /> +- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?<br /> +Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.<br /> +- Elbette G/D niye alayım indirim var diyorum.<br /> +Neyse girip deniyor, gelen koltuk numaralarını soruyorum.<br /> +- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)<br /> +- Değiştiremiyor musunuz?<br /> +- Malesef.<br /> +- Internet sürümüne mi giriyorsunuz diyorum ister istemez.<br /> +- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen<br /> +üzerine ek komisyon ekleniyor sadece.)<br /> +- Kim koltuk seçtiriyor bana ?<br /> +- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.</p> +<p>5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.<br /> +Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk<br /> +seçebiliyor musunuz?<br /> +- Bakalım yer boş olursa seçebiliriz diyor <span id="OBJ_PREFIX_DWT261" class="Object"><span id="OBJ_PREFIX_DWT262" class="Object">sat</span></span>ıcı bu kez.<br /> +- Ohh nihayet.<br /> +- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.<br /> +- Öğrencide G/D farkı yok cevabı geliyor.<br /> +- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız<br /> +öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe<br /> +yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat<br /> +eder.)<br /> +- Kredi kartımı, peşin mi?<br /> +- DIINN ! kredi kartı.. var dimi?<br /> +- Evet, 112 TL<br /> +- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.</p> +<p>Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye<br /> +kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak<br /> +bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.</p> +<p>Velhasıl,<br /> +Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX<br /> +Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX</p> +<p>Hayırlı yolculuklar.</p> +<p>=====================<br /> +Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor<br /> +daha. 2-3 nolarda <span id="OBJ_PREFIX_DWT263" class="Object"><span id="OBJ_PREFIX_DWT264" class="Object">sat</span></span>ılan yerler var.</p> +<p>Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya<br /> +<span id="OBJ_PREFIX_DWT265" class="Object">sat</span>ış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi<br /> +bekliyor olabilir, kimbilir?</p> +<p>Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?</p> + Çrş, 17 Haz 2009 21:33:17 +0000 + + + Hakan Uygun: IE, WTW ve Gıda Yardımı + http://www.hakanuygun.com/blog/?p=248 + http://www.hakanuygun.com/blog/?p=248 + +<p><a href="http://walktheweb.wfp.org/" target="_blank"><img class="aligncenter size-full wp-image-252" title="wfp-wtw" src="http://www.hakanuygun.com/blog/wp-content/uploads/2009/06/wfp-wtw.png" alt="wfp-wtw" width="512" height="240" /></a>Bugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft&#8217;un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8&#8242;in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara <a href="http://www.browserforthebetter.com/download.html" target="_blank">buradan</a> ulaşabilirsiniz&#8230;</p> +<p>Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin <a href="http://www.techcrunch.com/" target="_blank">TechCrunch</a>&#8216;da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.</p> +<p>İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin&#8230; Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı&#8217;nın <a href="http://walktheweb.wfp.org/" target="_blank">Walk The Web</a> kampanyasına bir göz atmanızı öneririm&#8230;</p> +<ul> +<li><a href="http://www.wfp.org/" target="_blank">www.wfp.org</a></li> +<li><a href="http://www.actionagainsthunger.org/" target="_blank">www.actionagainsthunger.org</a></li> +<li><a href="http://www.hakanuygun.com/blog/www.makepovertyhistory.org" target="_blank">www.makepovertyhistory.org</a></li> +<li><a href="http://www.standagainstpoverty.org" target="_blank">www.standagainstpoverty.org</a></li> +<li><a href="http://www.engineersagainstpoverty.org" target="_blank">www.engineersagainstpoverty.org</a></li> +<li><a href="http://www.whiteband.org" target="_blank">www.whiteband.org</a></li> +</ul> +<p>Son olarak da bugünlerde herkese önerdiğim gibi <a href="http://www.facebook.com/ext/share.php?sid=107634228486&h=FwnnE&u=6crnv&ref=mf" target="_blank">Yuva</a> ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.</p> + Sal, 16 Haz 2009 11:38:02 +0000 + + + Hakan Uygun: TBD Bilişim Kongresi’nde Özgür Yazılım Paneli + http://www.hakanuygun.com/blog/?p=244 + http://www.hakanuygun.com/blog/?p=244 + +<p>TBD&#8217;nin bu yıl 3.sünü düzenlediği <a href="http://www.istanbulbilisimkongresi.org.tr/" target="_blank">İstanbul Bilişim Kongresi</a>&#8216;nde Pazar günü saat 14:00&#8242;de Özgür Yazılım Paneli olacaktır. Panel&#8217;de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur&#8230;</p> +<p><strong> Yer:</strong> Marmara Üniversitesi Nişantaşı Kampüsü<br /> +Erdal İnönü Bilim ve Kültür Merkezi<br /> +<strong>Tarih:</strong> 31 Mayıs Pazar, 14:00 - 15:20<br /> +<strong>Oturum başkanı:</strong> Görkem Çetin<br /> +<strong>Konuşmacılar:</strong> Enver Altın, Hakan Uygun, Cahit Cengizhan</p> + Prş, 28 May 2009 16:22:08 +0000 + + + Hakan Uygun: Sıralama Algoritmaları + http://www.hakanuygun.com/blog/?p=231 + http://www.hakanuygun.com/blog/?p=231 + +<p>Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu <a href="http://www.sorting-algorithms.com/" target="_blank">siteye</a> bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz&#8230;</p> + Pzt, 13 Nis 2009 08:20:53 +0000 + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/sidebar.html b/DJAGEN/tags/djagen_old/djagen/templates/main/sidebar.html new file mode 100755 index 0000000..8709a87 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/sidebar.html @@ -0,0 +1,86 @@ + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main/simple.html b/DJAGEN/tags/djagen_old/djagen/templates/main/simple.html new file mode 100755 index 0000000..ab9cf73 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main/simple.html @@ -0,0 +1,439 @@ + + + + + + + Linux Gezegeni + + + + + + + + + + + + + +

    16 Mart 2010

    + +
    + + + +
    +
    +

    Yakın Doğu’da Seminer Rüzgarları

    +
    +
    +

    Geçen haftadan beri Yakın Doğu Üniversitesi’nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir’lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu’nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.

    +

    Yakın Doğu’ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.

    +

    +

    Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.

    +

    +

    Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.

    +

    +

    Ali Erdinc’in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.

    +

    +

    Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite duyuru sayfası, Facebook ve Twitter‘dan takip edebileceklerini söyleyelim. Hatta Kıbrıs’ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.

    +

    Lefkoşa’ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs’tan bildirdi.

    +

    +
    + + + +
    + +
    +

    05 Şubat 2010

    + +
    + + + +
    +
    +

    100 ml

    +
    +
    +

    1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs’a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.

    +

    Şimdi olayın benim açımdan garip noktalarına gelelim

    +

    * Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.

    +

    * Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.

    +

    * Elimle çöpe attım, o çok koydu.

    +

    Ben de bunun üzerine Ulaştırma Bakanlığı’na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006′da İngiltere’de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006′da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç’te, ABD ve Kanada’da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt’lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:

    +

    “Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.”

    +

    Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.

    +

    Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.

    +
    + + + +
    + +
    +

    29 Ocak 2010

    + +
    + + + +
    +
    +

    Artık Sun yok!

    +
    +
    +

    iPsunoraclead haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun’ı satın alma işlemini bitirdi. Artık www.sun.com adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.

    +

    Beni en çok ilgilendiren konular ise Sun’ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.

    +

    Umarım hepimiz için mutlu son olur…

    +

    Ek: Kültür Mantarı‘nın yönlendirmesi ile James Gosling’in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım…

    +

    sunrip

    + + + +
    + +
    +

    24 Aralık 2009

    + +
    + + + +
    +
    +

    EMO 13. Ulusal Kongresi

    +
    +
    +

    EMO’nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan 13. Ulusal Kongresi kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda “Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz” ve 11.30-12.30 arasında da “Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.

    +

    Genel olarak yüklü bir programı olan bu etkinlikte çeşitli LKD seminerleri de olacak. Buyrunuz geliniz!

    + + + +
    + +
    +

    24 Eylül 2009

    + +
    + + + +
    +
    +

    Intel, Atom, Moblin

    +
    +
    +

    Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel’e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin’i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel’den üç önemli açıklama oldu…

    +

    Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. Atom Developer Program‘ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var… ( Ben kayıt olacağım :) )

    +

    İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin’in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir akıllı telefon üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu :) Geçenlerde de yazmıştım,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız…

    + + + +
    + +
    +

    25 Ağustos 2009

    + +
    + + + +
    +
    +

    Teknik Destek Kopya Kağıtı

    +
    +
    +

    xkcd’de geçen gün yayınlanan bu teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.

    +

    teknikdestek
    +İsteyenler için ODF hali de burada

    + + + +
    + +
    +

    18 Ağustos 2009

    + +
    + + + +
    +
    +

    Korsan Değil “Fikir Hırsızı”

    +
    +
    +

    Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu’nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma başlatmış. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.

    +

    Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek :) Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor…

    +

    Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.

    +

    Son olarak bir haber daha verelim Pirate Bay’in 23 GB’lik arşivi de paylaşıma açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin :) Korsan değil özgür yazılım kullanın!

    + + + +
    + +
    +

    07 Temmuz 2009

    + +
    + + + +
    +
    +

    Mobil Cihazlar Dünyasında Neler Oluyor?

    +
    +
    +

    moblinBir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. Android, WebOS ve iPhone OS‘a  karşı Symbian‘ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan’ı açık kaynak kodlu olarak  bu vakfa devretmişti.

    +

    Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC’lerin geliştirilmesine olanak sağladı ve NetBook’lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.

    +

    Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : Moblin.

    +

    Moblin’e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia’nın kendi tabletlerinde kullanmak amacıyla ürettiği Maemo‘yu desteklemeye karar verdiğini açıkladı. Intel’de Moblin’i Linux Vakfı’na devrettiğini ve destek konusunda da Novell’le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia’nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo’yu Qt’ye taşıyacağını ilan etti.

    +

    İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo’yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID’ler üretip bunlarda Mameo’mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian’ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID’ler konusunda neler planlıyor? Bu planları içerisinde Moblin’i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo’ya yatırım mı yapacaklar? NetBook’larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?

    +

    Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?

    + + + +
    + +
    +

    17 Haziran 2009

    + +
    + + + +
    +
    +

    LKD Genel Kurulu için Ankara’ya

    +
    +
    +

    Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara’ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan’dan…

    +

    ***

    +

    Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,

    +

    TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca
    +kurum.
    +Evet bu kurum malesef bilet satmak istemiyor.

    +

    1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir
    +sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları
    +portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)

    +

    2- web sitesindeki bilet satış uygulamasında banka sıra makinelerinin bir
    +türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk
    +karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin
    +bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir
    +koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve
    +internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.
    +(İnternet olsun dedim, sonuç yok dedi.)

    +

    3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,
    +veee… Evet, biz satıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii
    +ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.

    +

    4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize
    +bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş
    +dönüş 1 tam 1 öğrenci istiyorum. Satıcı önce
    +- G/D kesmiyorum diyor buradan.!
    +- Nasıl yani?
    +- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?
    +Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.
    +- Elbette G/D niye alayım indirim var diyorum.
    +Neyse girip deniyor, gelen koltuk numaralarını soruyorum.
    +- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)
    +- Değiştiremiyor musunuz?
    +- Malesef.
    +- Internet sürümüne mi giriyorsunuz diyorum ister istemez.
    +- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen
    +üzerine ek komisyon ekleniyor sadece.)
    +- Kim koltuk seçtiriyor bana ?
    +- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.

    +

    5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.
    +Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk
    +seçebiliyor musunuz?
    +- Bakalım yer boş olursa seçebiliriz diyor satıcı bu kez.
    +- Ohh nihayet.
    +- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.
    +- Öğrencide G/D farkı yok cevabı geliyor.
    +- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız
    +öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe
    +yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat
    +eder.)
    +- Kredi kartımı, peşin mi?
    +- DIINN ! kredi kartı.. var dimi?
    +- Evet, 112 TL
    +- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.

    +

    Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye
    +kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak
    +bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.

    +

    Velhasıl,
    +Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX
    +Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX

    +

    Hayırlı yolculuklar.

    +

    =====================
    +Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor
    +daha. 2-3 nolarda satılan yerler var.

    +

    Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya
    +satış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi
    +bekliyor olabilir, kimbilir?

    +

    Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?

    + + + +
    + +
    +

    16 Haziran 2009

    + +
    + + + +
    +
    +

    IE, WTW ve Gıda Yardımı

    +
    +
    +

    wfp-wtwBugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft’un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8′in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara buradan ulaşabilirsiniz…

    +

    Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin TechCrunch‘da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.

    +

    İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin… Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı’nın Walk The Web kampanyasına bir göz atmanızı öneririm…

    + +

    Son olarak da bugünlerde herkese önerdiğim gibi Yuva ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.

    + + + +
    + +
    +

    28 Mayıs 2009

    + +
    + + + +
    +
    +

    TBD Bilişim Kongresi’nde Özgür Yazılım Paneli

    +
    +
    +

    TBD’nin bu yıl 3.sünü düzenlediği İstanbul Bilişim Kongresi‘nde Pazar günü saat 14:00′de Özgür Yazılım Paneli olacaktır. Panel’de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur…

    +

    Yer: Marmara Üniversitesi Nişantaşı Kampüsü
    +Erdal İnönü Bilim ve Kültür Merkezi
    +Tarih: 31 Mayıs Pazar, 14:00 - 15:20
    +Oturum başkanı: Görkem Çetin
    +Konuşmacılar: Enver Altın, Hakan Uygun, Cahit Cengizhan

    + + + +
    + +
    +

    13 Nisan 2009

    + +
    + + + +
    +
    +

    Sıralama Algoritmaları

    +
    +
    +

    Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu siteye bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz…

    + + + +
    + +
    +
    + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/0851-300x225.jpg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/0851-300x225.jpg new file mode 100755 index 0000000..a5e4467 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/0851-300x225.jpg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/1.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/1.jpeg new file mode 100755 index 0000000..46f70a2 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/1.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/10.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/10.jpeg new file mode 100755 index 0000000..46fd949 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/10.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/11.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/11.jpeg new file mode 100755 index 0000000..1d92561 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/11.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/12-94x300.jpg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/12-94x300.jpg new file mode 100755 index 0000000..e29a199 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/12-94x300.jpg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/13.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/13.jpeg new file mode 100755 index 0000000..0488574 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/13.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/14.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/14.jpeg new file mode 100755 index 0000000..e6d3057 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/14.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/16.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/16.jpeg new file mode 100755 index 0000000..09f1c1f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/16.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/17.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/17.jpeg new file mode 100755 index 0000000..ca208e0 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/17.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/18sayi2_640.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/18sayi2_640.png new file mode 100755 index 0000000..8bc2808 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/18sayi2_640.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/2.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/2.jpeg new file mode 100755 index 0000000..50ab51e Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/2.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-404038028370016804.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-404038028370016804.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-404038028370016804.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-4988596832115013148.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-4988596832115013148.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-4988596832115013148.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-7323915179892072139.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-7323915179892072139.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-7323915179892072139.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-8281808426046539478.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-8281808426046539478.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/20350364-8281808426046539478.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/234x60.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/234x60.png new file mode 100755 index 0000000..f143712 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/234x60.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/3.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/3.jpeg new file mode 100755 index 0000000..12b3923 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/3.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/4.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/4.jpeg new file mode 100755 index 0000000..f7e88cd Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/4.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/4680162149707281285-2346266379068077518.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/4680162149707281285-2346266379068077518.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/4680162149707281285-2346266379068077518.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/5.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/5.jpeg new file mode 100755 index 0000000..56e50c8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/5.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/6.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/6.jpeg new file mode 100755 index 0000000..1d3300f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/6.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/6806075996663386433-2148282368940771843.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/6806075996663386433-2148282368940771843.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/6806075996663386433-2148282368940771843.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/6806075996663386433-5133667699355153185.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/6806075996663386433-5133667699355153185.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/6806075996663386433-5133667699355153185.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/7.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/7.jpeg new file mode 100755 index 0000000..f1f6817 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/7.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/7538492-1930230618239368013.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/7538492-1930230618239368013.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/7538492-1930230618239368013.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/7797947221973353237-356649939989845988.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/7797947221973353237-356649939989845988.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/7797947221973353237-356649939989845988.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/7797947221973353237-845454562858207844.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/7797947221973353237-845454562858207844.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/7797947221973353237-845454562858207844.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/8.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/8.jpeg new file mode 100755 index 0000000..fc52a21 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/8.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/9.jpeg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/9.jpeg new file mode 100755 index 0000000..7e66f50 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/9.jpeg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/Gl-LYGyx7Wc.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/Gl-LYGyx7Wc.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/Gl-LYGyx7Wc.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/KQ4KKugY48I.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/KQ4KKugY48I.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/KQ4KKugY48I.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/MIIdvcBSNcg.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/MIIdvcBSNcg.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/MIIdvcBSNcg.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/ai.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ai.gif new file mode 100755 index 0000000..b0e185e Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ai.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/ajs.php b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ajs.php new file mode 100755 index 0000000..d6f1105 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ajs.php @@ -0,0 +1,3 @@ +var OX_66d5961d = ''; + +document.write(OX_66d5961d); diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/ajs_002.php b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ajs_002.php new file mode 100755 index 0000000..90680ce --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ajs_002.php @@ -0,0 +1,3 @@ +var OX_e6db9aad = ''; +OX_e6db9aad += "<"+"a href=\'http://reklam.lkd.org.tr/www/delivery/ck.php?oaparams=2__bannerid=18__zoneid=7__cb=ab233692a8__oadest=http%3A%2F%2Finternethaftasi.org.tr%2Fhafta10%2F\' target=\'_blank\'><"+"img src=\'http://reklam.lkd.org.tr/www/delivery/ai.php?filename=125x125_internet_haftasi_1.gif&contenttype=gif\' width=\'125\' height=\'125\' alt=\'internet haftasi 2010\' title=\'internet haftasi 2010\' border=\'0\' /><"+"/a><"+"div id=\'beacon_ab233692a8\' style=\'position: absolute; left: 0px; top: 0px; visibility: hidden;\'><"+"img src=\'http://reklam.lkd.org.tr/www/delivery/lg.php?bannerid=18&campaignid=1&zoneid=7&loc=http%3A%2F%2Fgezegen.linux.org.tr%2F&cb=ab233692a8\' width=\'0\' height=\'0\' alt=\'\' style=\'width: 0px; height: 0px;\' /><"+"/div>\n"; +document.write(OX_e6db9aad); diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/bloggers.css b/DJAGEN/tags/djagen_old/djagen/templates/main_files/bloggers.css new file mode 100755 index 0000000..30bc15b --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main_files/bloggers.css @@ -0,0 +1,55 @@ +#bloggers { + /* position: absolute; */ + top: 115px; + right: 15px; + width: 230px; +} + +#bloggers h2 { + margin-left: 0; + font-size: 12px; +} +#bloggers ul { + padding:0; + margin: 0 0 1.5em 0; + list-style-type:none; +} + +#bloggers ul li { + padding: 1px; +} + +#bloggers ul li div img { + +} + +#bloggers ul li div { + display: none; +} + +#bloggers ul li:hover > a { + font-weight: bold; +} +#bloggers ul li div img.head { + float: right; + padding: 0px; +} + +#bloggers ul li:hover > div { + display: inline; +} + +#bloggers ul li:hover { + padding: 0 0 10px 0; + background-color: #cfcfcf; +} + +#bloggers .ircnick { + display: block; + color: #000000; + font-style: italic; + padding: 2px; +} +#bloggers a:visited { + color: #5a7ac7 !important; +} diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/canince.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/canince.png new file mode 100755 index 0000000..e7b7f79 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/canince.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/certificate-196x300.jpg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/certificate-196x300.jpg new file mode 100755 index 0000000..64d51dc Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/certificate-196x300.jpg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/delicious.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/delicious.png new file mode 100755 index 0000000..c528207 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/delicious.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/delicious_002.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/delicious_002.png new file mode 100755 index 0000000..4a2b66b Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/delicious_002.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/devrimgunduz.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/devrimgunduz.png new file mode 100755 index 0000000..4100543 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/devrimgunduz.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/digg.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/digg.png new file mode 100755 index 0000000..747ad70 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/digg.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/facebook.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/facebook.png new file mode 100755 index 0000000..6bdfb39 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/facebook.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/firefox_logo-150x150.jpg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/firefox_logo-150x150.jpg new file mode 100755 index 0000000..e7dbb9a Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/firefox_logo-150x150.jpg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/friendfeed.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/friendfeed.png new file mode 100755 index 0000000..535020f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/friendfeed.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/ga.js b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ga.js new file mode 100755 index 0000000..373d332 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ga.js @@ -0,0 +1,39 @@ +(function(){var aa="_gat",ba="_gaq",r=true,v=false,w=undefined,ca="4.6.5",x="length",y="cookie",A="location",B="&",C="=",D="__utma=",E="__utmb=",G="__utmc=",da="__utmk=",H="__utmv=",J="__utmz=",K="__utmx=",L="GASO=";var N=function(i){return w==i||"-"==i||""==i},ea=function(i){return i[x]>0&&" \n\r\t".indexOf(i)>-1},P=function(i,l,g){var t="-",k;if(!N(i)&&!N(l)&&!N(g)){k=i.indexOf(l);if(k>-1){g=i.indexOf(g,k);if(g<0)g=i[x];t=O(i,k+l.indexOf(C)+1,g)}}return t},Q=function(i){var l=v,g=0,t,k;if(!N(i)){l=r;for(t=0;t-1)}}return l},S=function(i,l){var g=encodeURIComponent;return g instanceof Function?l?encodeURI(i):g(i):escape(i)}, +T=function(i,l){var g=decodeURIComponent,t;i=i.split("+").join(" ");if(g instanceof Function)try{t=l?decodeURI(i):g(i)}catch(k){t=unescape(i)}else t=unescape(i);return t},U=function(i,l){return i.indexOf(l)>-1},V=function(i,l){i[i[x]]=l},W=function(i){return i.toLowerCase()},X=function(i,l){return i.split(l)},fa=function(i,l){return i.indexOf(l)},O=function(i,l,g){g=w==g?i[x]:g;return i.substring(l,g)},ga=function(i,l){return i.join(l)},ia=function(i){var l=1,g=0,t;if(!N(i)){l=0;for(t=i[x]-1;t>=0;t--){g= +i.charCodeAt(t);l=(l<<6&268435455)+g+(g<<14);g=l&266338304;l=g!=0?l^g>>21:l}}return l},ja=function(){var i=window,l=w;if(i&&i.gaGlobal&&i.gaGlobal.hid)l=i.gaGlobal.hid;else{l=Y();i.gaGlobal=i.gaGlobal?i.gaGlobal:{};i.gaGlobal.hid=l}return l},Y=function(){return Math.round(Math.random()*2147483647)},Z={Ha:function(i,l){this.bb=i;this.nb=l},ib:v,_gasoDomain:w,_gasoCPath:w};Z.Gb=function(){function i(k){return new t(k[0],k[1])}function l(k){var p=[];k=k.split(",");var f;for(f=0;f0)n=n.split("^")[0];j=n.split(":");n=j[1];s=parseInt(j[0],10);if(!c&&s0?k(j):"";if(f.o){a=p.kc(f.a[y],d,f.o,a,j);d="2"+d;c=j>0?k(f.v):""}t(d+a,c)};p.kc=function(d,a,j,c,n){var s="";n=n||f.v;c=g([c,p.r+n*1],j);s=P(d,"2"+a,";");if(!N(s)){d=g(l(d,a,j,r),j);s=ga(s.split(d),"");return s=c+s}return c};p.fb=function(){return N(f.b)?"":"domain="+f.b+";"}};Z.$=function(i){function l(b){b=b instanceof Array?b.join("."):"";return N(b)?"-":b}function g(b,e){var o=[];if(!N(b)){o=b.split(".");if(e)for(b=0;b=1){f[2]=Math.min(Math.floor(f[2]*1+d),g.Pb);f[3]=h}return f};l.H=function(f,h,d,a,j,c){var n,s=g.I,b=g.a[A];t.ua(d);n=X(t.C(),".");if(n[1]<500||a){if(j)n=p(n);if(a||!j||n[2]>=1){if(!a&&j)n[2]=n[2]*1-1;n[1]=n[1]*1+1;f="?utmwv="+ca+"&utmn="+Y()+(N(b.hostname)?"":"&utmhn="+S(b.hostname))+(g.U==100?"":"&utmsp="+S(g.U))+f;if(0==s||2==s){a=2==s?k:c||k;l.$a(g.oa+f,a)}if(1==s|| +2==s){f=("https:"==b.protocol?"https://ssl.google-analytics.com/__utm.gif":"http://www.google-analytics.com/__utm.gif")+f+"&utmac="+h+"&utmcc="+l.ac(d);if(ka)f+="&gaq=1";l.$a(f,c)}}}t.ya(n.join("."));t.Ea()};l.$a=function(f,h){var d=new Image(1,1);d.src=f;d.onload=function(){d.onload=null;(h||k)()}};l.ac=function(f){var h=[],d=[D,J,H,K],a,j=t.k(),c;for(a=0;a0)for(f=0;f0;)f+=h--^d++;return ia(f)}};Z.m=function(i,l,g,t){function k(d){var a="";d=W(d.split("://")[1]);if(U(d,"/")){d=d.split("/")[1];if(U(d,"?"))a=d.split("?")[0]}return a}function p(d){var a="";a=W(d.split("://")[1]);if(U(a,"/"))a=a.split("/")[0];return a}var f=t,h=this;h.c=i;h.rb=l;h.r=g;h.ic=function(d){var a=h.gb();return new Z.m.w(P(d,f.Ka+C,B),P(d,f.Na+C,B),P(d,f.Pa+C,B),h.Q(d,f.Ia,"(not set)"),h.Q(d,f.La,"(not set)"),h.Q(d,f.Oa,a&&!N(a.K)?T(a.K):w),h.Q(d,f.Ja,w))};h.jb=function(d){var a=p(d),j=k(d);if(U(a,"google")){d=d.split("?").join(B); +if(U(d,B+f.oc+C))if(j==f.nc)return r}return v};h.gb=function(){var d,a=h.rb,j,c,n=f.T;if(!(N(a)||"0"==a||!U(a,"://")||h.jb(a))){d=p(a);for(j=0;j9?O(a,e+1)*1:0;s++;a=0==a?1:a;d.wb([o,h.r,a,s,n.Da()].join("."));d.Ga();return B+"utmcn=1"}else return B+"utmcr=1"}}; +Z.m.w=function(i,l,g,t,k,p,f){var h=this;h.q=i;h.X=l;h.ea=g;h.D=t;h.S=k;h.K=p;h.Ya=f;h.Da=function(){var d=[],a=[["cid",h.q],["csr",h.X],["gclid",h.ea],["ccn",h.D],["cmd",h.S],["ctr",h.K],["cct",h.Ya]],j,c;if(h.mb())for(j=0;j0&&j<=d.ob){c=S(c);n=S(n);if(c[x]+n[x]<=64){p.p[j]=[c,n,s];p.Z();b=r}}return b};p.mc=function(j){if((j=p.p[j])&&1===j[2])return j[1]};p.Ub=function(j){var c=p.p;if(c[j]){delete c[j];p.Z()}};p.Qb=function(){a._clearKey(8);a._clearKey(9);a._clearKey(11);var j=p.p,c,n;for(n in j)if(c=j[n]){a._setKey(8,n,c[0]);a._setKey(9,n,c[1]);(c=c[2])&&3!=c&&a._setKey(11,n,""+c)}}};Z.N=function(){function i(m,q,u,z){if(w==f[m])f[m]={};if(w==f[m][q])f[m][q]=[];f[m][q][u]=z}function l(m,q){if(w!=f[m]&&w!=f[m][q]){f[m][q]=w;q=r;var u;for(u=0;u-1}function k(b,e,o){if(N(b)||N(e)||N(o))return"-";b=P(b,D+a.c+".",e);if(!N(b)){b=b.split(".");b[5]=b[5]?b[5]*1+1:1;b[3]=b[4];b[4]=o;b=b.join(".")}return b}function p(){return"file:"!=c.a[A].protocol&&t()}function f(b){if(!b||""==b)return"";for(;ea(b.charAt(0));)b= +O(b,1);for(;ea(b.charAt(b[x]-1));)b=O(b,0,b[x]-1);return b}function h(b,e,o,m){if(!N(b())){e(m?T(b()):b());U(b(),";")||o()}}function d(b){var e,o=""!=b&&c.a[A].host!=b;if(o)for(e=0;e=0&&e<=8?"0":"["==b.charAt(0)&&"]"==b.charAt(b[x]-1)?"-":b}return b};a.wa=function(b){var e="",o=c.a;e+=c.fa?a.A.Ic():"";e+=c.da?a.Ua:"";e+=c.ga&&!N(o.title)?"&utmdt="+S(o.title):"";e+="&utmhid="+ja()+"&utmr="+S(a.ia)+"&utmp="+S(a.Bc(b));return e};a.Bc=function(b){var e=c.a[A]; +return b=w!=b&&""!=b?S(b,r):S(e.pathname+e.search,r)};a.Lc=function(b){if(a.J()){var e="";if(a.g!=w&&a.g.G()[x]>0)e+="&utme="+S(a.g.G());e+=a.wa(b);j.H(e,a.s,a.c)}};a.Tb=function(){var b=new Z.$(c);return b.ua(a.c)?b.Hc():w};a._getLinkerUrl=function(b,e){var o=b.split("#"),m=b,q=a.Tb();if(q)if(e&&1>=o[x])m+="#"+q;else if(!e||1>=o[x])if(1>=o[x])m+=(U(b,"?")?B:"?")+q;else m=o[0]+(U(b,"?")?B:"?")+q+"#"+o[1];return m};a.Fc=function(){var b;if(a.wc()){a.i.Dc(a.B);a.i.Oc();Z._gasoDomain=c.b;Z._gasoCPath= +c.h;b=c.a.createElement("script");b.type="text/javascript";b.id="_gasojs";b.src="https://www.google.com/analytics/reporting/overlay_js?gaso="+a.B+B+Y();c.a.getElementsByTagName("head")[0].appendChild(b)}};a.pc=function(){var b=a.r,e=a.i,o=e.k(),m=a.c+"",q=c.e,u=q?q.gaGlobal:w,z,M=U(o,D+m+"."),la=U(o,E+m),ma=U(o,G+m),F,I=[],R="",ha=v;o=N(o)?"":o;if(c.z){z=c.a[A]&&c.a[A].hash?c.a[A].href.substring(c.a[A].href.indexOf("#")):"";if(c.ba&&!N(z))R=z+B;R+=c.a[A].search;if(!N(R)&&U(R,D)){e.zc(R);e.kb()||e.Sb(); +F=e.ja()}h(e.ma,e.vb,e.Eb,true);h(e.la,e.Aa,e.Fa)}if(N(F))if(M)if(!la||!ma){F=k(o,";",b);a.F=r}else{F=P(o,D+m+".",";");I=X(P(o,E+m,";"),".")}else{F=ga([m,a.jc(),b,b,b,1],".");ha=a.F=r}else if(N(e.C())||N(e.ka())){F=k(R,B,b);a.F=r}else{I=X(e.C(),".");m=I[0]}F=F.split(".");if(q&&u&&u.dh==m&&!c.o){F[4]=u.sid?u.sid:F[4];if(ha){F[3]=u.sid?u.sid:F[4];if(u.vid){b=u.vid.split(".");F[1]=b[0];F[2]=b[1]}}}e.tb(F.join("."));I[0]=m;I[1]=I[1]?I[1]:0;I[2]=w!=I[2]?I[2]:c.Jc;I[3]=I[3]?I[3]:F[4];e.ya(I.join(".")); +e.ub(m);N(e.lc())||e.za(e.P());e.Cb();e.Ea();e.Db()};a.rc=function(){j=new Z.Kb(c)};a._initData=function(){var b;if(!n){if(!a.A){a.A=new Z.Fb(c);a.A.bc()}a.c=a.ec();a.i=new Z.$(c);a.g=new Z.N;s=new Z.Ib(c,a.c,a.i,a.g);a.rc()}if(p()){a.pc();s.tc()}if(!n){if(p()){a.ia=a.Yb(a.ab,c.a.domain);if(c.da){b=new Z.m(a.c,a.ia,a.r,c);a.Ua=b.cc(a.i,a.F)}}a.cb=new Z.N;n=r}Z.ib||a.sc()};a._visitCode=function(){a._initData();var b=P(a.i.k(),D+a.c+".",";");b=b.split(".");return b[x]<4?"":b[1]};a._cookiePathCopy=function(b){a._initData(); +a.i&&a.i.Nc(a.c,b)};a.sc=function(){var b=c.a[A].hash;if(b&&1==b.indexOf("gaso="))b=P(b,"gaso=",B);else b=(b=c.e.name)&&0<=b.indexOf("gaso=")?P(b,"gaso=",B):P(a.i.k(),L,";");if(b[x]>=10){a.B=b;a.Fc()}Z.ib=r};a.J=function(){return a._visitCode()%1E40){t=O(k,0,p);k=O(k,p+1)}var f=$._getAsyncTracker(t);f[k].apply(f,i[g].slice(1))}}catch(h){l++}return l}};window[aa]=Z;function na(){var i=window[ba],l=v;if(i&&typeof i.push=="function"){l=i.constructor==Array;if(!l)return}window[ba]=$;l&&$.push.apply($,i)}na();})() diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/generic.css b/DJAGEN/tags/djagen_old/djagen/templates/main_files/generic.css new file mode 100755 index 0000000..6614810 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main_files/generic.css @@ -0,0 +1,52 @@ +/* Basic tags */ +a img { + border: 0px; +} + +pre { + overflow: auto; +} + +/* Anchors */ +a { + color: #333638; +} + +a:visited { + color: #757B7F; +} + +a:active { + color: #ff0000; +} + +/* Basic classes */ + +.none { /* to add paragraph spacing to various elements for ttys */ + margin: 0px; + padding: 0px; +} + +.invisible { /* stuff that should appear when this css isn't used */ + margin: 0px; + border: 0px; + padding: 0px; + height: 0px; + visibility: hidden; +} + +.left { + margin: 10px; + padding: 0px; + float: left; +} + +.right { + margin: 10px; + padding: 0px; + float: right; +} + +.center { + text-align: center; +} diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/googlebookmark.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/googlebookmark.png new file mode 100755 index 0000000..c3656fd Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/googlebookmark.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/i3o2R1f3L5g.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/i3o2R1f3L5g.gif new file mode 100755 index 0000000..35d42e8 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/i3o2R1f3L5g.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/icon_smile.gif b/DJAGEN/tags/djagen_old/djagen/templates/main_files/icon_smile.gif new file mode 100755 index 0000000..7b1f6d3 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/icon_smile.gif differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/image.jpe b/DJAGEN/tags/djagen_old/djagen/templates/main_files/image.jpe new file mode 100755 index 0000000..09e4b83 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/image.jpe differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/jquery.js b/DJAGEN/tags/djagen_old/djagen/templates/main_files/jquery.js new file mode 100755 index 0000000..6289c99 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main_files/jquery.js @@ -0,0 +1,32 @@ +/* + * jQuery 1.2.6 - New Wave Javascript + * + * Copyright (c) 2008 John Resig (jquery.com) + * Dual licensed under the MIT (MIT-LICENSE.txt) + * and GPL (GPL-LICENSE.txt) licenses. + * + * $Date: 2008/05/26 $ + * $Rev: 5685 $ + */ +(function(){var _jQuery=window.jQuery,_$=window.$;var jQuery=window.jQuery=window.$=function(selector,context){return new jQuery.fn.init(selector,context);};var quickExpr=/^[^<]*(<(.|\s)+>)[^>]*$|^#(\w+)$/,isSimple=/^.[^:#\[\.]*$/,undefined;jQuery.fn=jQuery.prototype={init:function(selector,context){selector=selector||document;if(selector.nodeType){this[0]=selector;this.length=1;return this;}if(typeof selector=="string"){var match=quickExpr.exec(selector);if(match&&(match[1]||!context)){if(match[1])selector=jQuery.clean([match[1]],context);else{var elem=document.getElementById(match[3]);if(elem){if(elem.id!=match[3])return jQuery().find(selector);return jQuery(elem);}selector=[];}}else +return jQuery(context).find(selector);}else if(jQuery.isFunction(selector))return jQuery(document)[jQuery.fn.ready?"ready":"load"](selector);return this.setArray(jQuery.makeArray(selector));},jquery:"1.2.6",size:function(){return this.length;},length:0,get:function(num){return num==undefined?jQuery.makeArray(this):this[num];},pushStack:function(elems){var ret=jQuery(elems);ret.prevObject=this;return ret;},setArray:function(elems){this.length=0;Array.prototype.push.apply(this,elems);return this;},each:function(callback,args){return jQuery.each(this,callback,args);},index:function(elem){var ret=-1;return jQuery.inArray(elem&&elem.jquery?elem[0]:elem,this);},attr:function(name,value,type){var options=name;if(name.constructor==String)if(value===undefined)return this[0]&&jQuery[type||"attr"](this[0],name);else{options={};options[name]=value;}return this.each(function(i){for(name in options)jQuery.attr(type?this.style:this,name,jQuery.prop(this,options[name],type,i,name));});},css:function(key,value){if((key=='width'||key=='height')&&parseFloat(value)<0)value=undefined;return this.attr(key,value,"curCSS");},text:function(text){if(typeof text!="object"&&text!=null)return this.empty().append((this[0]&&this[0].ownerDocument||document).createTextNode(text));var ret="";jQuery.each(text||this,function(){jQuery.each(this.childNodes,function(){if(this.nodeType!=8)ret+=this.nodeType!=1?this.nodeValue:jQuery.fn.text([this]);});});return ret;},wrapAll:function(html){if(this[0])jQuery(html,this[0].ownerDocument).clone().insertBefore(this[0]).map(function(){var elem=this;while(elem.firstChild)elem=elem.firstChild;return elem;}).append(this);return this;},wrapInner:function(html){return this.each(function(){jQuery(this).contents().wrapAll(html);});},wrap:function(html){return this.each(function(){jQuery(this).wrapAll(html);});},append:function(){return this.domManip(arguments,true,false,function(elem){if(this.nodeType==1)this.appendChild(elem);});},prepend:function(){return this.domManip(arguments,true,true,function(elem){if(this.nodeType==1)this.insertBefore(elem,this.firstChild);});},before:function(){return this.domManip(arguments,false,false,function(elem){this.parentNode.insertBefore(elem,this);});},after:function(){return this.domManip(arguments,false,true,function(elem){this.parentNode.insertBefore(elem,this.nextSibling);});},end:function(){return this.prevObject||jQuery([]);},find:function(selector){var elems=jQuery.map(this,function(elem){return jQuery.find(selector,elem);});return this.pushStack(/[^+>] [^+>]/.test(selector)||selector.indexOf("..")>-1?jQuery.unique(elems):elems);},clone:function(events){var ret=this.map(function(){if(jQuery.browser.msie&&!jQuery.isXMLDoc(this)){var clone=this.cloneNode(true),container=document.createElement("div");container.appendChild(clone);return jQuery.clean([container.innerHTML])[0];}else +return this.cloneNode(true);});var clone=ret.find("*").andSelf().each(function(){if(this[expando]!=undefined)this[expando]=null;});if(events===true)this.find("*").andSelf().each(function(i){if(this.nodeType==3)return;var events=jQuery.data(this,"events");for(var type in events)for(var handler in events[type])jQuery.event.add(clone[i],type,events[type][handler],events[type][handler].data);});return ret;},filter:function(selector){return this.pushStack(jQuery.isFunction(selector)&&jQuery.grep(this,function(elem,i){return selector.call(elem,i);})||jQuery.multiFilter(selector,this));},not:function(selector){if(selector.constructor==String)if(isSimple.test(selector))return this.pushStack(jQuery.multiFilter(selector,this,true));else +selector=jQuery.multiFilter(selector,this);var isArrayLike=selector.length&&selector[selector.length-1]!==undefined&&!selector.nodeType;return this.filter(function(){return isArrayLike?jQuery.inArray(this,selector)<0:this!=selector;});},add:function(selector){return this.pushStack(jQuery.unique(jQuery.merge(this.get(),typeof selector=='string'?jQuery(selector):jQuery.makeArray(selector))));},is:function(selector){return!!selector&&jQuery.multiFilter(selector,this).length>0;},hasClass:function(selector){return this.is("."+selector);},val:function(value){if(value==undefined){if(this.length){var elem=this[0];if(jQuery.nodeName(elem,"select")){var index=elem.selectedIndex,values=[],options=elem.options,one=elem.type=="select-one";if(index<0)return null;for(var i=one?index:0,max=one?index+1:options.length;i=0||jQuery.inArray(this.name,value)>=0);else if(jQuery.nodeName(this,"select")){var values=jQuery.makeArray(value);jQuery("option",this).each(function(){this.selected=(jQuery.inArray(this.value,values)>=0||jQuery.inArray(this.text,values)>=0);});if(!values.length)this.selectedIndex=-1;}else +this.value=value;});},html:function(value){return value==undefined?(this[0]?this[0].innerHTML:null):this.empty().append(value);},replaceWith:function(value){return this.after(value).remove();},eq:function(i){return this.slice(i,i+1);},slice:function(){return this.pushStack(Array.prototype.slice.apply(this,arguments));},map:function(callback){return this.pushStack(jQuery.map(this,function(elem,i){return callback.call(elem,i,elem);}));},andSelf:function(){return this.add(this.prevObject);},data:function(key,value){var parts=key.split(".");parts[1]=parts[1]?"."+parts[1]:"";if(value===undefined){var data=this.triggerHandler("getData"+parts[1]+"!",[parts[0]]);if(data===undefined&&this.length)data=jQuery.data(this[0],key);return data===undefined&&parts[1]?this.data(parts[0]):data;}else +return this.trigger("setData"+parts[1]+"!",[parts[0],value]).each(function(){jQuery.data(this,key,value);});},removeData:function(key){return this.each(function(){jQuery.removeData(this,key);});},domManip:function(args,table,reverse,callback){var clone=this.length>1,elems;return this.each(function(){if(!elems){elems=jQuery.clean(args,this.ownerDocument);if(reverse)elems.reverse();}var obj=this;if(table&&jQuery.nodeName(this,"table")&&jQuery.nodeName(elems[0],"tr"))obj=this.getElementsByTagName("tbody")[0]||this.appendChild(this.ownerDocument.createElement("tbody"));var scripts=jQuery([]);jQuery.each(elems,function(){var elem=clone?jQuery(this).clone(true)[0]:this;if(jQuery.nodeName(elem,"script"))scripts=scripts.add(elem);else{if(elem.nodeType==1)scripts=scripts.add(jQuery("script",elem).remove());callback.call(obj,elem);}});scripts.each(evalScript);});}};jQuery.fn.init.prototype=jQuery.fn;function evalScript(i,elem){if(elem.src)jQuery.ajax({url:elem.src,async:false,dataType:"script"});else +jQuery.globalEval(elem.text||elem.textContent||elem.innerHTML||"");if(elem.parentNode)elem.parentNode.removeChild(elem);}function now(){return+new Date;}jQuery.extend=jQuery.fn.extend=function(){var target=arguments[0]||{},i=1,length=arguments.length,deep=false,options;if(target.constructor==Boolean){deep=target;target=arguments[1]||{};i=2;}if(typeof target!="object"&&typeof target!="function")target={};if(length==i){target=this;--i;}for(;i-1;}},swap:function(elem,options,callback){var old={};for(var name in options){old[name]=elem.style[name];elem.style[name]=options[name];}callback.call(elem);for(var name in options)elem.style[name]=old[name];},css:function(elem,name,force){if(name=="width"||name=="height"){var val,props={position:"absolute",visibility:"hidden",display:"block"},which=name=="width"?["Left","Right"]:["Top","Bottom"];function getWH(){val=name=="width"?elem.offsetWidth:elem.offsetHeight;var padding=0,border=0;jQuery.each(which,function(){padding+=parseFloat(jQuery.curCSS(elem,"padding"+this,true))||0;border+=parseFloat(jQuery.curCSS(elem,"border"+this+"Width",true))||0;});val-=Math.round(padding+border);}if(jQuery(elem).is(":visible"))getWH();else +jQuery.swap(elem,props,getWH);return Math.max(0,val);}return jQuery.curCSS(elem,name,force);},curCSS:function(elem,name,force){var ret,style=elem.style;function color(elem){if(!jQuery.browser.safari)return false;var ret=defaultView.getComputedStyle(elem,null);return!ret||ret.getPropertyValue("color")=="";}if(name=="opacity"&&jQuery.browser.msie){ret=jQuery.attr(style,"opacity");return ret==""?"1":ret;}if(jQuery.browser.opera&&name=="display"){var save=style.outline;style.outline="0 solid black";style.outline=save;}if(name.match(/float/i))name=styleFloat;if(!force&&style&&style[name])ret=style[name];else if(defaultView.getComputedStyle){if(name.match(/float/i))name="float";name=name.replace(/([A-Z])/g,"-$1").toLowerCase();var computedStyle=defaultView.getComputedStyle(elem,null);if(computedStyle&&!color(elem))ret=computedStyle.getPropertyValue(name);else{var swap=[],stack=[],a=elem,i=0;for(;a&&color(a);a=a.parentNode)stack.unshift(a);for(;i]*?)\/>/g,function(all,front,tag){return tag.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i)?all:front+">";});var tags=jQuery.trim(elem).toLowerCase(),div=context.createElement("div");var wrap=!tags.indexOf("",""]||!tags.indexOf("",""]||tags.match(/^<(thead|tbody|tfoot|colg|cap)/)&&[1,"","
    "]||!tags.indexOf("",""]||(!tags.indexOf("",""]||!tags.indexOf("",""]||jQuery.browser.msie&&[1,"div
    ","
    "]||[0,"",""];div.innerHTML=wrap[1]+elem+wrap[2];while(wrap[0]--)div=div.lastChild;if(jQuery.browser.msie){var tbody=!tags.indexOf(""&&tags.indexOf("=0;--j)if(jQuery.nodeName(tbody[j],"tbody")&&!tbody[j].childNodes.length)tbody[j].parentNode.removeChild(tbody[j]);if(/^\s/.test(elem))div.insertBefore(context.createTextNode(elem.match(/^\s*/)[0]),div.firstChild);}elem=jQuery.makeArray(div.childNodes);}if(elem.length===0&&(!jQuery.nodeName(elem,"form")&&!jQuery.nodeName(elem,"select")))return;if(elem[0]==undefined||jQuery.nodeName(elem,"form")||elem.options)ret.push(elem);else +ret=jQuery.merge(ret,elem);});return ret;},attr:function(elem,name,value){if(!elem||elem.nodeType==3||elem.nodeType==8)return undefined;var notxml=!jQuery.isXMLDoc(elem),set=value!==undefined,msie=jQuery.browser.msie;name=notxml&&jQuery.props[name]||name;if(elem.tagName){var special=/href|src|style/.test(name);if(name=="selected"&&jQuery.browser.safari)elem.parentNode.selectedIndex;if(name in elem&¬xml&&!special){if(set){if(name=="type"&&jQuery.nodeName(elem,"input")&&elem.parentNode)throw"type property can't be changed";elem[name]=value;}if(jQuery.nodeName(elem,"form")&&elem.getAttributeNode(name))return elem.getAttributeNode(name).nodeValue;return elem[name];}if(msie&¬xml&&name=="style")return jQuery.attr(elem.style,"cssText",value);if(set)elem.setAttribute(name,""+value);var attr=msie&¬xml&&special?elem.getAttribute(name,2):elem.getAttribute(name);return attr===null?undefined:attr;}if(msie&&name=="opacity"){if(set){elem.zoom=1;elem.filter=(elem.filter||"").replace(/alpha\([^)]*\)/,"")+(parseInt(value)+''=="NaN"?"":"alpha(opacity="+value*100+")");}return elem.filter&&elem.filter.indexOf("opacity=")>=0?(parseFloat(elem.filter.match(/opacity=([^)]*)/)[1])/100)+'':"";}name=name.replace(/-([a-z])/ig,function(all,letter){return letter.toUpperCase();});if(set)elem[name]=value;return elem[name];},trim:function(text){return(text||"").replace(/^\s+|\s+$/g,"");},makeArray:function(array){var ret=[];if(array!=null){var i=array.length;if(i==null||array.split||array.setInterval||array.call)ret[0]=array;else +while(i)ret[--i]=array[i];}return ret;},inArray:function(elem,array){for(var i=0,length=array.length;i*",this).remove();while(this.firstChild)this.removeChild(this.firstChild);}},function(name,fn){jQuery.fn[name]=function(){return this.each(fn,arguments);};});jQuery.each(["Height","Width"],function(i,name){var type=name.toLowerCase();jQuery.fn[type]=function(size){return this[0]==window?jQuery.browser.opera&&document.body["client"+name]||jQuery.browser.safari&&window["inner"+name]||document.compatMode=="CSS1Compat"&&document.documentElement["client"+name]||document.body["client"+name]:this[0]==document?Math.max(Math.max(document.body["scroll"+name],document.documentElement["scroll"+name]),Math.max(document.body["offset"+name],document.documentElement["offset"+name])):size==undefined?(this.length?jQuery.css(this[0],type):null):this.css(type,size.constructor==String?size:size+"px");};});function num(elem,prop){return elem[0]&&parseInt(jQuery.curCSS(elem[0],prop,true),10)||0;}var chars=jQuery.browser.safari&&parseInt(jQuery.browser.version)<417?"(?:[\\w*_-]|\\\\.)":"(?:[\\w\u0128-\uFFFF*_-]|\\\\.)",quickChild=new RegExp("^>\\s*("+chars+"+)"),quickID=new RegExp("^("+chars+"+)(#)("+chars+"+)"),quickClass=new RegExp("^([#.]?)("+chars+"*)");jQuery.extend({expr:{"":function(a,i,m){return m[2]=="*"||jQuery.nodeName(a,m[2]);},"#":function(a,i,m){return a.getAttribute("id")==m[2];},":":{lt:function(a,i,m){return im[3]-0;},nth:function(a,i,m){return m[3]-0==i;},eq:function(a,i,m){return m[3]-0==i;},first:function(a,i){return i==0;},last:function(a,i,m,r){return i==r.length-1;},even:function(a,i){return i%2==0;},odd:function(a,i){return i%2;},"first-child":function(a){return a.parentNode.getElementsByTagName("*")[0]==a;},"last-child":function(a){return jQuery.nth(a.parentNode.lastChild,1,"previousSibling")==a;},"only-child":function(a){return!jQuery.nth(a.parentNode.lastChild,2,"previousSibling");},parent:function(a){return a.firstChild;},empty:function(a){return!a.firstChild;},contains:function(a,i,m){return(a.textContent||a.innerText||jQuery(a).text()||"").indexOf(m[3])>=0;},visible:function(a){return"hidden"!=a.type&&jQuery.css(a,"display")!="none"&&jQuery.css(a,"visibility")!="hidden";},hidden:function(a){return"hidden"==a.type||jQuery.css(a,"display")=="none"||jQuery.css(a,"visibility")=="hidden";},enabled:function(a){return!a.disabled;},disabled:function(a){return a.disabled;},checked:function(a){return a.checked;},selected:function(a){return a.selected||jQuery.attr(a,"selected");},text:function(a){return"text"==a.type;},radio:function(a){return"radio"==a.type;},checkbox:function(a){return"checkbox"==a.type;},file:function(a){return"file"==a.type;},password:function(a){return"password"==a.type;},submit:function(a){return"submit"==a.type;},image:function(a){return"image"==a.type;},reset:function(a){return"reset"==a.type;},button:function(a){return"button"==a.type||jQuery.nodeName(a,"button");},input:function(a){return/input|select|textarea|button/i.test(a.nodeName);},has:function(a,i,m){return jQuery.find(m[3],a).length;},header:function(a){return/h\d/i.test(a.nodeName);},animated:function(a){return jQuery.grep(jQuery.timers,function(fn){return a==fn.elem;}).length;}}},parse:[/^(\[) *@?([\w-]+) *([!*$^~=]*) *('?"?)(.*?)\4 *\]/,/^(:)([\w-]+)\("?'?(.*?(\(.*?\))?[^(]*?)"?'?\)/,new RegExp("^([:.#]*)("+chars+"+)")],multiFilter:function(expr,elems,not){var old,cur=[];while(expr&&expr!=old){old=expr;var f=jQuery.filter(expr,elems,not);expr=f.t.replace(/^\s*,\s*/,"");cur=not?elems=f.r:jQuery.merge(cur,f.r);}return cur;},find:function(t,context){if(typeof t!="string")return[t];if(context&&context.nodeType!=1&&context.nodeType!=9)return[];context=context||document;var ret=[context],done=[],last,nodeName;while(t&&last!=t){var r=[];last=t;t=jQuery.trim(t);var foundToken=false,re=quickChild,m=re.exec(t);if(m){nodeName=m[1].toUpperCase();for(var i=0;ret[i];i++)for(var c=ret[i].firstChild;c;c=c.nextSibling)if(c.nodeType==1&&(nodeName=="*"||c.nodeName.toUpperCase()==nodeName))r.push(c);ret=r;t=t.replace(re,"");if(t.indexOf(" ")==0)continue;foundToken=true;}else{re=/^([>+~])\s*(\w*)/i;if((m=re.exec(t))!=null){r=[];var merge={};nodeName=m[2].toUpperCase();m=m[1];for(var j=0,rl=ret.length;j=0;if(!not&&pass||not&&!pass)tmp.push(r[i]);}return tmp;},filter:function(t,r,not){var last;while(t&&t!=last){last=t;var p=jQuery.parse,m;for(var i=0;p[i];i++){m=p[i].exec(t);if(m){t=t.substring(m[0].length);m[2]=m[2].replace(/\\/g,"");break;}}if(!m)break;if(m[1]==":"&&m[2]=="not")r=isSimple.test(m[3])?jQuery.filter(m[3],r,true).r:jQuery(r).not(m[3]);else if(m[1]==".")r=jQuery.classFilter(r,m[2],not);else if(m[1]=="["){var tmp=[],type=m[3];for(var i=0,rl=r.length;i=0)^not)tmp.push(a);}r=tmp;}else if(m[1]==":"&&m[2]=="nth-child"){var merge={},tmp=[],test=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(m[3]=="even"&&"2n"||m[3]=="odd"&&"2n+1"||!/\D/.test(m[3])&&"0n+"+m[3]||m[3]),first=(test[1]+(test[2]||1))-0,last=test[3]-0;for(var i=0,rl=r.length;i=0)add=true;if(add^not)tmp.push(node);}r=tmp;}else{var fn=jQuery.expr[m[1]];if(typeof fn=="object")fn=fn[m[2]];if(typeof fn=="string")fn=eval("false||function(a,i){return "+fn+";}");r=jQuery.grep(r,function(elem,i){return fn(elem,i,m,r);},not);}}return{r:r,t:t};},dir:function(elem,dir){var matched=[],cur=elem[dir];while(cur&&cur!=document){if(cur.nodeType==1)matched.push(cur);cur=cur[dir];}return matched;},nth:function(cur,result,dir,elem){result=result||1;var num=0;for(;cur;cur=cur[dir])if(cur.nodeType==1&&++num==result)break;return cur;},sibling:function(n,elem){var r=[];for(;n;n=n.nextSibling){if(n.nodeType==1&&n!=elem)r.push(n);}return r;}});jQuery.event={add:function(elem,types,handler,data){if(elem.nodeType==3||elem.nodeType==8)return;if(jQuery.browser.msie&&elem.setInterval)elem=window;if(!handler.guid)handler.guid=this.guid++;if(data!=undefined){var fn=handler;handler=this.proxy(fn,function(){return fn.apply(this,arguments);});handler.data=data;}var events=jQuery.data(elem,"events")||jQuery.data(elem,"events",{}),handle=jQuery.data(elem,"handle")||jQuery.data(elem,"handle",function(){if(typeof jQuery!="undefined"&&!jQuery.event.triggered)return jQuery.event.handle.apply(arguments.callee.elem,arguments);});handle.elem=elem;jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];handler.type=parts[1];var handlers=events[type];if(!handlers){handlers=events[type]={};if(!jQuery.event.special[type]||jQuery.event.special[type].setup.call(elem)===false){if(elem.addEventListener)elem.addEventListener(type,handle,false);else if(elem.attachEvent)elem.attachEvent("on"+type,handle);}}handlers[handler.guid]=handler;jQuery.event.global[type]=true;});elem=null;},guid:1,global:{},remove:function(elem,types,handler){if(elem.nodeType==3||elem.nodeType==8)return;var events=jQuery.data(elem,"events"),ret,index;if(events){if(types==undefined||(typeof types=="string"&&types.charAt(0)=="."))for(var type in events)this.remove(elem,type+(types||""));else{if(types.type){handler=types.handler;types=types.type;}jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];if(events[type]){if(handler)delete events[type][handler.guid];else +for(handler in events[type])if(!parts[1]||events[type][handler].type==parts[1])delete events[type][handler];for(ret in events[type])break;if(!ret){if(!jQuery.event.special[type]||jQuery.event.special[type].teardown.call(elem)===false){if(elem.removeEventListener)elem.removeEventListener(type,jQuery.data(elem,"handle"),false);else if(elem.detachEvent)elem.detachEvent("on"+type,jQuery.data(elem,"handle"));}ret=null;delete events[type];}}});}for(ret in events)break;if(!ret){var handle=jQuery.data(elem,"handle");if(handle)handle.elem=null;jQuery.removeData(elem,"events");jQuery.removeData(elem,"handle");}}},trigger:function(type,data,elem,donative,extra){data=jQuery.makeArray(data);if(type.indexOf("!")>=0){type=type.slice(0,-1);var exclusive=true;}if(!elem){if(this.global[type])jQuery("*").add([window,document]).trigger(type,data);}else{if(elem.nodeType==3||elem.nodeType==8)return undefined;var val,ret,fn=jQuery.isFunction(elem[type]||null),event=!data[0]||!data[0].preventDefault;if(event){data.unshift({type:type,target:elem,preventDefault:function(){},stopPropagation:function(){},timeStamp:now()});data[0][expando]=true;}data[0].type=type;if(exclusive)data[0].exclusive=true;var handle=jQuery.data(elem,"handle");if(handle)val=handle.apply(elem,data);if((!fn||(jQuery.nodeName(elem,'a')&&type=="click"))&&elem["on"+type]&&elem["on"+type].apply(elem,data)===false)val=false;if(event)data.shift();if(extra&&jQuery.isFunction(extra)){ret=extra.apply(elem,val==null?data:data.concat(val));if(ret!==undefined)val=ret;}if(fn&&donative!==false&&val!==false&&!(jQuery.nodeName(elem,'a')&&type=="click")){this.triggered=true;try{elem[type]();}catch(e){}}this.triggered=false;}return val;},handle:function(event){var val,ret,namespace,all,handlers;event=arguments[0]=jQuery.event.fix(event||window.event);namespace=event.type.split(".");event.type=namespace[0];namespace=namespace[1];all=!namespace&&!event.exclusive;handlers=(jQuery.data(this,"events")||{})[event.type];for(var j in handlers){var handler=handlers[j];if(all||handler.type==namespace){event.handler=handler;event.data=handler.data;ret=handler.apply(this,arguments);if(val!==false)val=ret;if(ret===false){event.preventDefault();event.stopPropagation();}}}return val;},fix:function(event){if(event[expando]==true)return event;var originalEvent=event;event={originalEvent:originalEvent};var props="altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode metaKey newValue originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target timeStamp toElement type view wheelDelta which".split(" ");for(var i=props.length;i;i--)event[props[i]]=originalEvent[props[i]];event[expando]=true;event.preventDefault=function(){if(originalEvent.preventDefault)originalEvent.preventDefault();originalEvent.returnValue=false;};event.stopPropagation=function(){if(originalEvent.stopPropagation)originalEvent.stopPropagation();originalEvent.cancelBubble=true;};event.timeStamp=event.timeStamp||now();if(!event.target)event.target=event.srcElement||document;if(event.target.nodeType==3)event.target=event.target.parentNode;if(!event.relatedTarget&&event.fromElement)event.relatedTarget=event.fromElement==event.target?event.toElement:event.fromElement;if(event.pageX==null&&event.clientX!=null){var doc=document.documentElement,body=document.body;event.pageX=event.clientX+(doc&&doc.scrollLeft||body&&body.scrollLeft||0)-(doc.clientLeft||0);event.pageY=event.clientY+(doc&&doc.scrollTop||body&&body.scrollTop||0)-(doc.clientTop||0);}if(!event.which&&((event.charCode||event.charCode===0)?event.charCode:event.keyCode))event.which=event.charCode||event.keyCode;if(!event.metaKey&&event.ctrlKey)event.metaKey=event.ctrlKey;if(!event.which&&event.button)event.which=(event.button&1?1:(event.button&2?3:(event.button&4?2:0)));return event;},proxy:function(fn,proxy){proxy.guid=fn.guid=fn.guid||proxy.guid||this.guid++;return proxy;},special:{ready:{setup:function(){bindReady();return;},teardown:function(){return;}},mouseenter:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseover",jQuery.event.special.mouseenter.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseover",jQuery.event.special.mouseenter.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseenter";return jQuery.event.handle.apply(this,arguments);}},mouseleave:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseout",jQuery.event.special.mouseleave.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseout",jQuery.event.special.mouseleave.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseleave";return jQuery.event.handle.apply(this,arguments);}}}};jQuery.fn.extend({bind:function(type,data,fn){return type=="unload"?this.one(type,data,fn):this.each(function(){jQuery.event.add(this,type,fn||data,fn&&data);});},one:function(type,data,fn){var one=jQuery.event.proxy(fn||data,function(event){jQuery(this).unbind(event,one);return(fn||data).apply(this,arguments);});return this.each(function(){jQuery.event.add(this,type,one,fn&&data);});},unbind:function(type,fn){return this.each(function(){jQuery.event.remove(this,type,fn);});},trigger:function(type,data,fn){return this.each(function(){jQuery.event.trigger(type,data,this,true,fn);});},triggerHandler:function(type,data,fn){return this[0]&&jQuery.event.trigger(type,data,this[0],false,fn);},toggle:function(fn){var args=arguments,i=1;while(i=0){var selector=url.slice(off,url.length);url=url.slice(0,off);}callback=callback||function(){};var type="GET";if(params)if(jQuery.isFunction(params)){callback=params;params=null;}else{params=jQuery.param(params);type="POST";}var self=this;jQuery.ajax({url:url,type:type,dataType:"html",data:params,complete:function(res,status){if(status=="success"||status=="notmodified")self.html(selector?jQuery("
    ").append(res.responseText.replace(//g,"")).find(selector):res.responseText);self.each(callback,[res.responseText,status,res]);}});return this;},serialize:function(){return jQuery.param(this.serializeArray());},serializeArray:function(){return this.map(function(){return jQuery.nodeName(this,"form")?jQuery.makeArray(this.elements):this;}).filter(function(){return this.name&&!this.disabled&&(this.checked||/select|textarea/i.test(this.nodeName)||/text|hidden|password/i.test(this.type));}).map(function(i,elem){var val=jQuery(this).val();return val==null?null:val.constructor==Array?jQuery.map(val,function(val,i){return{name:elem.name,value:val};}):{name:elem.name,value:val};}).get();}});jQuery.each("ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","),function(i,o){jQuery.fn[o]=function(f){return this.bind(o,f);};});var jsc=now();jQuery.extend({get:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data=null;}return jQuery.ajax({type:"GET",url:url,data:data,success:callback,dataType:type});},getScript:function(url,callback){return jQuery.get(url,null,callback,"script");},getJSON:function(url,data,callback){return jQuery.get(url,data,callback,"json");},post:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data={};}return jQuery.ajax({type:"POST",url:url,data:data,success:callback,dataType:type});},ajaxSetup:function(settings){jQuery.extend(jQuery.ajaxSettings,settings);},ajaxSettings:{url:location.href,global:true,type:"GET",timeout:0,contentType:"application/x-www-form-urlencoded",processData:true,async:true,data:null,username:null,password:null,accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},ajax:function(s){s=jQuery.extend(true,s,jQuery.extend(true,{},jQuery.ajaxSettings,s));var jsonp,jsre=/=\?(&|$)/g,status,data,type=s.type.toUpperCase();if(s.data&&s.processData&&typeof s.data!="string")s.data=jQuery.param(s.data);if(s.dataType=="jsonp"){if(type=="GET"){if(!s.url.match(jsre))s.url+=(s.url.match(/\?/)?"&":"?")+(s.jsonp||"callback")+"=?";}else if(!s.data||!s.data.match(jsre))s.data=(s.data?s.data+"&":"")+(s.jsonp||"callback")+"=?";s.dataType="json";}if(s.dataType=="json"&&(s.data&&s.data.match(jsre)||s.url.match(jsre))){jsonp="jsonp"+jsc++;if(s.data)s.data=(s.data+"").replace(jsre,"="+jsonp+"$1");s.url=s.url.replace(jsre,"="+jsonp+"$1");s.dataType="script";window[jsonp]=function(tmp){data=tmp;success();complete();window[jsonp]=undefined;try{delete window[jsonp];}catch(e){}if(head)head.removeChild(script);};}if(s.dataType=="script"&&s.cache==null)s.cache=false;if(s.cache===false&&type=="GET"){var ts=now();var ret=s.url.replace(/(\?|&)_=.*?(&|$)/,"$1_="+ts+"$2");s.url=ret+((ret==s.url)?(s.url.match(/\?/)?"&":"?")+"_="+ts:"");}if(s.data&&type=="GET"){s.url+=(s.url.match(/\?/)?"&":"?")+s.data;s.data=null;}if(s.global&&!jQuery.active++)jQuery.event.trigger("ajaxStart");var remote=/^(?:\w+:)?\/\/([^\/?#]+)/;if(s.dataType=="script"&&type=="GET"&&remote.test(s.url)&&remote.exec(s.url)[1]!=location.host){var head=document.getElementsByTagName("head")[0];var script=document.createElement("script");script.src=s.url;if(s.scriptCharset)script.charset=s.scriptCharset;if(!jsonp){var done=false;script.onload=script.onreadystatechange=function(){if(!done&&(!this.readyState||this.readyState=="loaded"||this.readyState=="complete")){done=true;success();complete();head.removeChild(script);}};}head.appendChild(script);return undefined;}var requestDone=false;var xhr=window.ActiveXObject?new ActiveXObject("Microsoft.XMLHTTP"):new XMLHttpRequest();if(s.username)xhr.open(type,s.url,s.async,s.username,s.password);else +xhr.open(type,s.url,s.async);try{if(s.data)xhr.setRequestHeader("Content-Type",s.contentType);if(s.ifModified)xhr.setRequestHeader("If-Modified-Since",jQuery.lastModified[s.url]||"Thu, 01 Jan 1970 00:00:00 GMT");xhr.setRequestHeader("X-Requested-With","XMLHttpRequest");xhr.setRequestHeader("Accept",s.dataType&&s.accepts[s.dataType]?s.accepts[s.dataType]+", */*":s.accepts._default);}catch(e){}if(s.beforeSend&&s.beforeSend(xhr,s)===false){s.global&&jQuery.active--;xhr.abort();return false;}if(s.global)jQuery.event.trigger("ajaxSend",[xhr,s]);var onreadystatechange=function(isTimeout){if(!requestDone&&xhr&&(xhr.readyState==4||isTimeout=="timeout")){requestDone=true;if(ival){clearInterval(ival);ival=null;}status=isTimeout=="timeout"&&"timeout"||!jQuery.httpSuccess(xhr)&&"error"||s.ifModified&&jQuery.httpNotModified(xhr,s.url)&&"notmodified"||"success";if(status=="success"){try{data=jQuery.httpData(xhr,s.dataType,s.dataFilter);}catch(e){status="parsererror";}}if(status=="success"){var modRes;try{modRes=xhr.getResponseHeader("Last-Modified");}catch(e){}if(s.ifModified&&modRes)jQuery.lastModified[s.url]=modRes;if(!jsonp)success();}else +jQuery.handleError(s,xhr,status);complete();if(s.async)xhr=null;}};if(s.async){var ival=setInterval(onreadystatechange,13);if(s.timeout>0)setTimeout(function(){if(xhr){xhr.abort();if(!requestDone)onreadystatechange("timeout");}},s.timeout);}try{xhr.send(s.data);}catch(e){jQuery.handleError(s,xhr,null,e);}if(!s.async)onreadystatechange();function success(){if(s.success)s.success(data,status);if(s.global)jQuery.event.trigger("ajaxSuccess",[xhr,s]);}function complete(){if(s.complete)s.complete(xhr,status);if(s.global)jQuery.event.trigger("ajaxComplete",[xhr,s]);if(s.global&&!--jQuery.active)jQuery.event.trigger("ajaxStop");}return xhr;},handleError:function(s,xhr,status,e){if(s.error)s.error(xhr,status,e);if(s.global)jQuery.event.trigger("ajaxError",[xhr,s,e]);},active:0,httpSuccess:function(xhr){try{return!xhr.status&&location.protocol=="file:"||(xhr.status>=200&&xhr.status<300)||xhr.status==304||xhr.status==1223||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpNotModified:function(xhr,url){try{var xhrRes=xhr.getResponseHeader("Last-Modified");return xhr.status==304||xhrRes==jQuery.lastModified[url]||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpData:function(xhr,type,filter){var ct=xhr.getResponseHeader("content-type"),xml=type=="xml"||!type&&ct&&ct.indexOf("xml")>=0,data=xml?xhr.responseXML:xhr.responseText;if(xml&&data.documentElement.tagName=="parsererror")throw"parsererror";if(filter)data=filter(data,type);if(type=="script")jQuery.globalEval(data);if(type=="json")data=eval("("+data+")");return data;},param:function(a){var s=[];if(a.constructor==Array||a.jquery)jQuery.each(a,function(){s.push(encodeURIComponent(this.name)+"="+encodeURIComponent(this.value));});else +for(var j in a)if(a[j]&&a[j].constructor==Array)jQuery.each(a[j],function(){s.push(encodeURIComponent(j)+"="+encodeURIComponent(this));});else +s.push(encodeURIComponent(j)+"="+encodeURIComponent(jQuery.isFunction(a[j])?a[j]():a[j]));return s.join("&").replace(/%20/g,"+");}});jQuery.fn.extend({show:function(speed,callback){return speed?this.animate({height:"show",width:"show",opacity:"show"},speed,callback):this.filter(":hidden").each(function(){this.style.display=this.oldblock||"";if(jQuery.css(this,"display")=="none"){var elem=jQuery("<"+this.tagName+" />").appendTo("body");this.style.display=elem.css("display");if(this.style.display=="none")this.style.display="block";elem.remove();}}).end();},hide:function(speed,callback){return speed?this.animate({height:"hide",width:"hide",opacity:"hide"},speed,callback):this.filter(":visible").each(function(){this.oldblock=this.oldblock||jQuery.css(this,"display");this.style.display="none";}).end();},_toggle:jQuery.fn.toggle,toggle:function(fn,fn2){return jQuery.isFunction(fn)&&jQuery.isFunction(fn2)?this._toggle.apply(this,arguments):fn?this.animate({height:"toggle",width:"toggle",opacity:"toggle"},fn,fn2):this.each(function(){jQuery(this)[jQuery(this).is(":hidden")?"show":"hide"]();});},slideDown:function(speed,callback){return this.animate({height:"show"},speed,callback);},slideUp:function(speed,callback){return this.animate({height:"hide"},speed,callback);},slideToggle:function(speed,callback){return this.animate({height:"toggle"},speed,callback);},fadeIn:function(speed,callback){return this.animate({opacity:"show"},speed,callback);},fadeOut:function(speed,callback){return this.animate({opacity:"hide"},speed,callback);},fadeTo:function(speed,to,callback){return this.animate({opacity:to},speed,callback);},animate:function(prop,speed,easing,callback){var optall=jQuery.speed(speed,easing,callback);return this[optall.queue===false?"each":"queue"](function(){if(this.nodeType!=1)return false;var opt=jQuery.extend({},optall),p,hidden=jQuery(this).is(":hidden"),self=this;for(p in prop){if(prop[p]=="hide"&&hidden||prop[p]=="show"&&!hidden)return opt.complete.call(this);if(p=="height"||p=="width"){opt.display=jQuery.css(this,"display");opt.overflow=this.style.overflow;}}if(opt.overflow!=null)this.style.overflow="hidden";opt.curAnim=jQuery.extend({},prop);jQuery.each(prop,function(name,val){var e=new jQuery.fx(self,opt,name);if(/toggle|show|hide/.test(val))e[val=="toggle"?hidden?"show":"hide":val](prop);else{var parts=val.toString().match(/^([+-]=)?([\d+-.]+)(.*)$/),start=e.cur(true)||0;if(parts){var end=parseFloat(parts[2]),unit=parts[3]||"px";if(unit!="px"){self.style[name]=(end||1)+unit;start=((end||1)/e.cur(true))*start;self.style[name]=start+unit;}if(parts[1])end=((parts[1]=="-="?-1:1)*end)+start;e.custom(start,end,unit);}else +e.custom(start,val,"");}});return true;});},queue:function(type,fn){if(jQuery.isFunction(type)||(type&&type.constructor==Array)){fn=type;type="fx";}if(!type||(typeof type=="string"&&!fn))return queue(this[0],type);return this.each(function(){if(fn.constructor==Array)queue(this,type,fn);else{queue(this,type).push(fn);if(queue(this,type).length==1)fn.call(this);}});},stop:function(clearQueue,gotoEnd){var timers=jQuery.timers;if(clearQueue)this.queue([]);this.each(function(){for(var i=timers.length-1;i>=0;i--)if(timers[i].elem==this){if(gotoEnd)timers[i](true);timers.splice(i,1);}});if(!gotoEnd)this.dequeue();return this;}});var queue=function(elem,type,array){if(elem){type=type||"fx";var q=jQuery.data(elem,type+"queue");if(!q||array)q=jQuery.data(elem,type+"queue",jQuery.makeArray(array));}return q;};jQuery.fn.dequeue=function(type){type=type||"fx";return this.each(function(){var q=queue(this,type);q.shift();if(q.length)q[0].call(this);});};jQuery.extend({speed:function(speed,easing,fn){var opt=speed&&speed.constructor==Object?speed:{complete:fn||!fn&&easing||jQuery.isFunction(speed)&&speed,duration:speed,easing:fn&&easing||easing&&easing.constructor!=Function&&easing};opt.duration=(opt.duration&&opt.duration.constructor==Number?opt.duration:jQuery.fx.speeds[opt.duration])||jQuery.fx.speeds.def;opt.old=opt.complete;opt.complete=function(){if(opt.queue!==false)jQuery(this).dequeue();if(jQuery.isFunction(opt.old))opt.old.call(this);};return opt;},easing:{linear:function(p,n,firstNum,diff){return firstNum+diff*p;},swing:function(p,n,firstNum,diff){return((-Math.cos(p*Math.PI)/2)+0.5)*diff+firstNum;}},timers:[],timerId:null,fx:function(elem,options,prop){this.options=options;this.elem=elem;this.prop=prop;if(!options.orig)options.orig={};}});jQuery.fx.prototype={update:function(){if(this.options.step)this.options.step.call(this.elem,this.now,this);(jQuery.fx.step[this.prop]||jQuery.fx.step._default)(this);if(this.prop=="height"||this.prop=="width")this.elem.style.display="block";},cur:function(force){if(this.elem[this.prop]!=null&&this.elem.style[this.prop]==null)return this.elem[this.prop];var r=parseFloat(jQuery.css(this.elem,this.prop,force));return r&&r>-10000?r:parseFloat(jQuery.curCSS(this.elem,this.prop))||0;},custom:function(from,to,unit){this.startTime=now();this.start=from;this.end=to;this.unit=unit||this.unit||"px";this.now=this.start;this.pos=this.state=0;this.update();var self=this;function t(gotoEnd){return self.step(gotoEnd);}t.elem=this.elem;jQuery.timers.push(t);if(jQuery.timerId==null){jQuery.timerId=setInterval(function(){var timers=jQuery.timers;for(var i=0;ithis.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;var done=true;for(var i in this.options.curAnim)if(this.options.curAnim[i]!==true)done=false;if(done){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;this.elem.style.display=this.options.display;if(jQuery.css(this.elem,"display")=="none")this.elem.style.display="block";}if(this.options.hide)this.elem.style.display="none";if(this.options.hide||this.options.show)for(var p in this.options.curAnim)jQuery.attr(this.elem.style,p,this.options.orig[p]);}if(done)this.options.complete.call(this.elem);return false;}else{var n=t-this.startTime;this.state=n/this.options.duration;this.pos=jQuery.easing[this.options.easing||(jQuery.easing.swing?"swing":"linear")](this.state,n,0,1,this.options.duration);this.now=this.start+((this.end-this.start)*this.pos);this.update();}return true;}};jQuery.extend(jQuery.fx,{speeds:{slow:600,fast:200,def:400},step:{scrollLeft:function(fx){fx.elem.scrollLeft=fx.now;},scrollTop:function(fx){fx.elem.scrollTop=fx.now;},opacity:function(fx){jQuery.attr(fx.elem.style,"opacity",fx.now);},_default:function(fx){fx.elem.style[fx.prop]=fx.now+fx.unit;}}});jQuery.fn.offset=function(){var left=0,top=0,elem=this[0],results;if(elem)with(jQuery.browser){var parent=elem.parentNode,offsetChild=elem,offsetParent=elem.offsetParent,doc=elem.ownerDocument,safari2=safari&&parseInt(version)<522&&!/adobeair/i.test(userAgent),css=jQuery.curCSS,fixed=css(elem,"position")=="fixed";if(elem.getBoundingClientRect){var box=elem.getBoundingClientRect();add(box.left+Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),box.top+Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));add(-doc.documentElement.clientLeft,-doc.documentElement.clientTop);}else{add(elem.offsetLeft,elem.offsetTop);while(offsetParent){add(offsetParent.offsetLeft,offsetParent.offsetTop);if(mozilla&&!/^t(able|d|h)$/i.test(offsetParent.tagName)||safari&&!safari2)border(offsetParent);if(!fixed&&css(offsetParent,"position")=="fixed")fixed=true;offsetChild=/^body$/i.test(offsetParent.tagName)?offsetChild:offsetParent;offsetParent=offsetParent.offsetParent;}while(parent&&parent.tagName&&!/^body|html$/i.test(parent.tagName)){if(!/^inline|table.*$/i.test(css(parent,"display")))add(-parent.scrollLeft,-parent.scrollTop);if(mozilla&&css(parent,"overflow")!="visible")border(parent);parent=parent.parentNode;}if((safari2&&(fixed||css(offsetChild,"position")=="absolute"))||(mozilla&&css(offsetChild,"position")!="absolute"))add(-doc.body.offsetLeft,-doc.body.offsetTop);if(fixed)add(Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));}results={top:top,left:left};}function border(elem){add(jQuery.curCSS(elem,"borderLeftWidth",true),jQuery.curCSS(elem,"borderTopWidth",true));}function add(l,t){left+=parseInt(l,10)||0;top+=parseInt(t,10)||0;}return results;};jQuery.fn.extend({position:function(){var left=0,top=0,results;if(this[0]){var offsetParent=this.offsetParent(),offset=this.offset(),parentOffset=/^body|html$/i.test(offsetParent[0].tagName)?{top:0,left:0}:offsetParent.offset();offset.top-=num(this,'marginTop');offset.left-=num(this,'marginLeft');parentOffset.top+=num(offsetParent,'borderTopWidth');parentOffset.left+=num(offsetParent,'borderLeftWidth');results={top:offset.top-parentOffset.top,left:offset.left-parentOffset.left};}return results;},offsetParent:function(){var offsetParent=this[0].offsetParent;while(offsetParent&&(!/^body|html$/i.test(offsetParent.tagName)&&jQuery.css(offsetParent,'position')=='static'))offsetParent=offsetParent.offsetParent;return jQuery(offsetParent);}});jQuery.each(['Left','Top'],function(i,name){var method='scroll'+name;jQuery.fn[method]=function(val){if(!this[0])return;return val!=undefined?this.each(function(){this==window||this==document?window.scrollTo(!i?val:jQuery(window).scrollLeft(),i?val:jQuery(window).scrollTop()):this[method]=val;}):this[0]==window||this[0]==document?self[i?'pageYOffset':'pageXOffset']||jQuery.boxModel&&document.documentElement[method]||document.body[method]:this[0][method];};});jQuery.each(["Height","Width"],function(i,name){var tl=i?"Left":"Top",br=i?"Right":"Bottom";jQuery.fn["inner"+name]=function(){return this[name.toLowerCase()]()+num(this,"padding"+tl)+num(this,"padding"+br);};jQuery.fn["outer"+name]=function(margin){return this["inner"+name]()+num(this,"border"+tl+"Width")+num(this,"border"+br+"Width")+(margin?num(this,"margin"+tl)+num(this,"margin"+br):0);};});})(); \ No newline at end of file diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/jquery_002.js b/DJAGEN/tags/djagen_old/djagen/templates/main_files/jquery_002.js new file mode 100755 index 0000000..aab4864 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main_files/jquery_002.js @@ -0,0 +1 @@ +jQuery.cookie=function(B,I,L){if(typeof I!="undefined"){L=L||{};if(I===null){I="";L.expires=-1}var E="";if(L.expires&&(typeof L.expires=="number"||L.expires.toUTCString)){var F;if(typeof L.expires=="number"){F=new Date();F.setTime(F.getTime()+(L.expires*24*60*60*1000))}else{F=L.expires}E="; expires="+F.toUTCString()}var K=L.path?"; path="+(L.path):"";var G=L.domain?"; domain="+(L.domain):"";var A=L.secure?"; secure":"";document.cookie=[B,"=",encodeURIComponent(I),E,K,G,A].join("")}else{var D=null;if(document.cookie&&document.cookie!=""){var J=document.cookie.split(";");for(var H=0;H + + + Linux Gezegeni + + + + + + + + + + + + + + + + + + + + + +
    + + +
    + + +
    + + {% for entry in entries_list|slice:"25" %} + {% autoescape off %} + + {% ifchanged %}

    {{ entry.date|date:"d F Y" }}

    {% endifchanged %} + + +
    + + +
    +
    +
    +

    + {{ entry.title }} +

    +
    +
    +
    + + + {{ entry.content_html|truncatewords_html:truncate_words }} + +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    +{% endautoescape %} + {% endfor %} + + + + + + + + + + + + + + diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/main.jpe b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main.jpe new file mode 100755 index 0000000..0243e81 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main.jpe differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_002.jpe b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_002.jpe new file mode 100755 index 0000000..1d53979 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_002.jpe differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_003.jpe b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_003.jpe new file mode 100755 index 0000000..c872e2d Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_003.jpe differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_004.jpe b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_004.jpe new file mode 100755 index 0000000..f87243e Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_004.jpe differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_005.jpe b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_005.jpe new file mode 100755 index 0000000..12a20b4 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_005.jpe differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_006.jpe b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_006.jpe new file mode 100755 index 0000000..5192e73 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/main_006.jpe differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/mozilla-foundation-logo-250x235.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/mozilla-foundation-logo-250x235.png new file mode 100755 index 0000000..f777934 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/mozilla-foundation-logo-250x235.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/myspace.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/myspace.png new file mode 100755 index 0000000..5e8ae18 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/myspace.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/necatidemir.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/necatidemir.png new file mode 100755 index 0000000..8ee65e2 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/necatidemir.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/necdetyucel.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/necdetyucel.png new file mode 100755 index 0000000..daf9772 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/necdetyucel.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/nlayout.css b/DJAGEN/tags/djagen_old/djagen/templates/main_files/nlayout.css new file mode 100755 index 0000000..72be5ec --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main_files/nlayout.css @@ -0,0 +1,316 @@ +body { + margin: 0px; + padding: 0px; + font-family: sans-serif; + background-color: white; + color: black; +} + +/* GEZEGEN strip */ + +#gezegen-sites { + list-style: none; + background: #2E3436 url(img/sites-bg.png) 0 100% repeat-x; + text-align: right; + padding: 0 1ex; + margin: 0; + font-size: 75%; +} + +#gezegen-sites ul { + margin: 0; + padding: 0; +} + +#gezegen-sites li { + display: inline; + background: url(img/sites-sp.png) 0 0 no-repeat; + padding-top: 10px; + padding-bottom: 8px; + margin-left: 0px; + margin-top: 0px; +} + +#gezegen-sites li a { + font-weight: bold; + color: #FFFFFF; + margin: 0 2ex; + text-decoration: none; + line-height: 30px; +} + +#gezegen-sites li a:hover { + text-decoration: underline; +} + +#gezegen-sites .home { + float: left; + background: url(img/sites-sp.png) 100% 0 no-repeat; + padding-top: 0; + padding-bottom: 0; +} + +#gezegen-sites .home a { + float: left; + margin-left: 0; + padding-left: 27px; +} + +/* Site header and masthead */ + +#header { + position: relative; + width: 100%; + background-color: #729FCF; +} + +#masthead { + display: table; + /* req for ie */ + border-top: 1px solid #729FCF; +} + +#site-logo { + vertical-align: middle; + display: table-cell; + float: left; + border: 0; + padding: 10px; + /* req for ie */ + margin-top: expression((this.parentElement.height - this.height)/2); +} + +#site-title { + vertical-align: middle; + display: table-cell; + /* req for ie */ + margin-top: expression((this.parentElement.height - this.height)/2); +} + +#site-name { + margin: 0; +} + +#site-name a { + font-size: xx-large; + font-weight: bold; + text-decoration: none; + color: black; +} + +#site-slogan { + font-size: 80%; + font-style: italic; + margin: 0; +} + +#footer-link { + position: absolute; + right: 1em; + bottom: 1em; + margin: 0; + font-size: 80%; + color: black; + text-decoration: none; + background: url(img/help-about.png) left no-repeat; + padding-left: 20px; +} +#footer-link:hover { text-decoration: underline; } + +div.breadcrumb { + font-size: 75%; +} + +/* Search form */ + +#search { + position: relative; + float: right; + top: 1em; + right: 1em; +} + +#search input.form-text, #search input[name="q"] { + border: 1px solid #888888; + padding: 0.5ex; + background-position: center !important; +} + +#search input.form-submit, #search input[name="sa"] { + background: white url(img/search-icon.gif) no-repeat; + padding: 1px 1px 1px 15px; + border: 1px solid #888888; + display: none; +} + +/* Tabs */ +#site-tabs { + position: absolute; + right: 0px; + bottom: 0px; + width: 100%; + background: transparent url(img/bar.png) 0 100% repeat-x; + margin: 0; + padding: 0; +} + +#site-tabs ul { + float: right; + list-style: none; + margin: 0; + margin-right: 3ex; + font-size: 75%; + clear: none; +} + +#site-tabs ul li { + float: left; + margin: 0; + margin-left: 0.2ex; +} + +#site-tabs ul li a:hover { + color: #111111; +} + +#site-tabs ul li a { + float: left; + text-decoration: none; + color: #555555; + background: #eeeeee; + padding: 7px 7px 7px 7px; + border-bottom: 2px solid #CCCCCC; +} + +#site-tabs ul li a.active { + color: #3566A5; + background: white; + border-top: 2px solid #5555ff; + border-bottom: 2px solid white; +} + +/* Content */ +#content { + margin: 0px auto 0px auto; + padding: 0px 1em 0px 1em; + max-width: 65em; +} + +#content h1.title { + margin: 0; +} + +/* Feeds & Footer */ +#feeds { + background: #dcdcdc url(img/feeds-bg.png) repeat-x left top; + padding: 0.5em 0px 0.5em 0px; +} +#feeds h3 { + margin: 0px; + padding: 0px 3% 0px 3%; + font-size: 100%; +} +#feeds h3 a { + background: transparent url(img/dt-closed.png) no-repeat left top; + padding-left: 20px; + margin-left: -20px; + color: #000; + text-decoration: none; +} +#feeds h3.open a { + background: transparent url(img/dt-open.png) no-repeat left top; +} +#feedlist { + display: none; + margin: 0.5em 1em 0.5em 1em; + background-color: #eee; + -moz-border-radius: 1em; + padding: 1em; + column-count: 1; + column-gap: 1em; + -moz-column-count: 1; + -moz-column-gap: 1em; + -webkit-column-count: 1; + -webkit-column-gap: 1em; +} +#feedlist ul { + margin: 0px; + padding: 0px; + list-style-type: none; + font-size: 90%; +} +#feedlist ul li * { + vertical-align: middle; +} +#feedlist ul li input { + margin: 0.2em; +} +#feedlist ul li a { + color: #000; + text-decoration: none; +} +#feedlist ul li a:hover { + text-decoration: underline; +} +#feedlist ul li a.message { + color: #999; +} +#feedlist ul li a img { + margin: 0px 0.2em; + border: 0px; +} + +#footer { + background: black url(img/footer-bg.png) repeat-x left top; + padding: 1%; + font-size: x-small; + color: #ccc; + overflow: hidden; + line-height: 150%; +} + +#footer a { + color: #000000; + font-weight: bold; + text-decoration: none; +} +#footer a:hover { + text-decoration: underline; +} + +#footer .column { + float: left; + width: 20%; + margin-right: 3%; +} + +#footer .section { + margin-bottom: 1em; +} + +#footer .section h3 { + margin: 0; + font-size: 140%; +} + +#footer .section a img { + border: 1px solid #cccccc; +} + +#footer .section ul { + list-style: none; + margin-left: 0; + padding-left: 0; +} + +#fineprint { + display: inline; + float: right; + text-align: right; + width: 25%; +} + +#ownership { + margin-top: 2em; + font-size: 90%; +} diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/nobody.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/nobody.png new file mode 100755 index 0000000..1dcef1f Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/nobody.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/ozgur_poster2.jpg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ozgur_poster2.jpg new file mode 100755 index 0000000..c78d6aa Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ozgur_poster2.jpg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/pdf.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/pdf.png new file mode 100755 index 0000000..126439e Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/pdf.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/planet.css b/DJAGEN/tags/djagen_old/djagen/templates/main_files/planet.css new file mode 100755 index 0000000..16af408 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/templates/main_files/planet.css @@ -0,0 +1,134 @@ +div.entry { + clear: both; + margin-bottom: 2em; + margin-right: 1em; +} + +.post-contents img { padding: 10px; } + +div.person-info { + float: left; + position: relative; + left: 4px; + margin-top: 25px!important; + padding: 0 20px 30px 0; + width: 120px; + background: url(images/bubble/bubble-nipple.png) top right no-repeat; + text-align: center; +} + +div.person-info a { + text-decoration: none; + color: #666; +} + +div.post { + background: #fff url(images/bubble/bubble-left.png) left repeat-y; + margin-left: 140px; +} + +div.post2 { + background: url(images/bubble/bubble-right.png) right repeat-y; +} + +div.post-contents { + padding: 0 25px 0 25px; + margin-right: 10px; +} + +div.post-contents p { + line-height: 140%; + margin-top: 1em!important; +} + +div.post-contents blockquote { + color: #666; + line-height: 150%; +} + +div.post-contents:after { + content: ""; + display: block; + clear: both; +} + +h4.post-title, div.post-title { + background: url(images/bubble/bubble-top-left.png) top left no-repeat; + margin: 1em 0 0 0; +} + +h4.post-title a, div.post-title span { + display: block; + background: url(images/bubble/bubble-top-right.png) top right no-repeat; + padding: 22px 25px 0 25px; + font-weight: normal; + font-size: 140%; + text-decoration: none; +} + +h4.post-title a:hover { + text-decoration: underline; +} + +div.post-title span { + display: block; + height: 20px; + font-size: 90%; +} + +div.post-title { + display: block; +} + +div.post-header { + background: url(images/bubble/bubble-top.png) top repeat-x; +} + + +div.post-footer { + background: url(images/bubble/bubble-bottom.png) bottom repeat-x; +} + +div.post-footer p { + background: url(images/bubble/bubble-bottom-left.png) bottom left no-repeat; + margin: 0; +} + +div.post-footer p a { + display: block; + background: url(images/bubble/bubble-bottom-right.png) bottom right no-repeat; + padding: 15px 20px 20px 25px; + text-align: right; + font-size: 85%; + color: #999; + text-decoration: none; +} + +div.post-footer p a:hover { + color: inherit; + text-decoration: underline; +} + +h2.date { + color: #666; + font-weight: normal; + font-size: 130%; + padding-left: 9px; +} + +#sidebar ul li { + font-size: small; +} + +#sidebar ul li a { + text-decoration: none; +} + +#sidebar ul li a:hover { + text-decoration: underline; +} + +#sidebar .message { + cursor: help; + color: #666; +} diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/sakarya-212x300.jpg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/sakarya-212x300.jpg new file mode 100755 index 0000000..f12a412 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/sakarya-212x300.jpg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/seminercg.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/seminercg.png new file mode 100755 index 0000000..a5e334c Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/seminercg.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/slashdot.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/slashdot.png new file mode 100755 index 0000000..4e2bd29 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/slashdot.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/spacer.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/spacer.png new file mode 100755 index 0000000..e82fd73 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/spacer.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/stumbleupon.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/stumbleupon.png new file mode 100755 index 0000000..0f1c4eb Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/stumbleupon.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/technorati.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/technorati.png new file mode 100755 index 0000000..a4f3587 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/technorati.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/thunderbird-logo-121241106779321-150x150.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/thunderbird-logo-121241106779321-150x150.png new file mode 100755 index 0000000..1c479e9 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/thunderbird-logo-121241106779321-150x150.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/twitter.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/twitter.png new file mode 100755 index 0000000..2ab33f2 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/twitter.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/ubunchu.jpg b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ubunchu.jpg new file mode 100755 index 0000000..1386bb5 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/ubunchu.jpg differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/wp-logogrey-xl-150x150.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/wp-logogrey-xl-150x150.png new file mode 100755 index 0000000..9c0ecb9 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/wp-logogrey-xl-150x150.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/templates/main_files/yorum.png b/DJAGEN/tags/djagen_old/djagen/templates/main_files/yorum.png new file mode 100755 index 0000000..e30d6f0 Binary files /dev/null and b/DJAGEN/tags/djagen_old/djagen/templates/main_files/yorum.png differ diff --git a/DJAGEN/tags/djagen_old/djagen/testdir/__init__.py b/DJAGEN/tags/djagen_old/djagen/testdir/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/tags/djagen_old/djagen/testdir/deneme.py b/DJAGEN/tags/djagen_old/djagen/testdir/deneme.py new file mode 100755 index 0000000..f0e5a5e --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/testdir/deneme.py @@ -0,0 +1,7 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +class Deneme: + + def test(self): + print "ok" diff --git a/DJAGEN/tags/djagen_old/djagen/urls.py b/DJAGEN/tags/djagen_old/djagen/urls.py new file mode 100755 index 0000000..c8b6191 --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/urls.py @@ -0,0 +1,28 @@ +from django.conf.urls.defaults import * +from djagen.collector.views import * +from djagen import settings + + +# Uncomment the next two lines to enable the admin: +from django.contrib import admin +admin.autodiscover() + +urlpatterns = patterns('', + + # Uncomment the admin/doc line below and add 'django.contrib.admindocs' + # to INSTALLED_APPS to enable admin documentation: + # (r'^admin/doc/', include('django.contrib.admindocs.urls')), + + # Uncomment the next line to enable the admin: + (r'^admin/', include(admin.site.urls)), + #(r'^archive/$',archive), + (r'^archive/(?P\d{4})/$', archive), + (r'^archive/(?P\d{4})/(?P\d{1,2})/$', archive), + (r'^djagen/$',main), + + # For development server. + #(r'^(?P.*)$', 'django.views.static.serve', + # {'document_root': settings.BASEPATH + 'gezegen/www/'}), + + +) \ No newline at end of file diff --git a/DJAGEN/tags/djagen_old/djagen/wsgi_handler.py b/DJAGEN/tags/djagen_old/djagen/wsgi_handler.py new file mode 100755 index 0000000..419437f --- /dev/null +++ b/DJAGEN/tags/djagen_old/djagen/wsgi_handler.py @@ -0,0 +1,11 @@ +import sys +import os + +# WSGI handler module. + +sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..') +os.environ['DJANGO_SETTINGS_MODULE'] = 'djagen.settings' + +import django.core.handlers.wsgi + +application = django.core.handlers.wsgi.WSGIHandler() \ No newline at end of file diff --git a/DJAGEN/trunk/djagen/__init__.py b/DJAGEN/trunk/djagen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/trunk/djagen/captcha/__init__.py b/DJAGEN/trunk/djagen/captcha/__init__.py new file mode 100755 index 0000000..ac47d9a --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/__init__.py @@ -0,0 +1,12 @@ +VERSION = (0, 1, 7) + +def get_version(svn=False): + "Returns the version as a human-format string." + v = '.'.join([str(i) for i in VERSION]) + if svn: + from django.utils.version import get_svn_revision + import os + svn_rev = get_svn_revision(os.path.dirname(__file__)) + if svn_rev: + v = '%s-%s' % (v, svn_rev) + return v diff --git a/DJAGEN/trunk/djagen/captcha/conf/__init__.py b/DJAGEN/trunk/djagen/captcha/conf/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/trunk/djagen/captcha/conf/settings.py b/DJAGEN/trunk/djagen/captcha/conf/settings.py new file mode 100755 index 0000000..ddfe82f --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/conf/settings.py @@ -0,0 +1,49 @@ +import os +from django.conf import settings + +CAPTCHA_FONT_PATH = getattr(settings,'CAPTCHA_FONT_PATH', os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'fonts/Vera.ttf'))) +CAPTCHA_FONT_SIZE = getattr(settings,'CAPTCHA_FONT_SIZE', 22) +CAPTCHA_LETTER_ROTATION = getattr(settings, 'CAPTCHA_LETTER_ROTATION', (-35,35)) +CAPTCHA_BACKGROUND_COLOR = getattr(settings,'CAPTCHA_BACKGROUND_COLOR', '#ffffff') +CAPTCHA_FOREGROUND_COLOR= getattr(settings,'CAPTCHA_FOREGROUND_COLOR', '#001100') +CAPTCHA_CHALLENGE_FUNCT = getattr(settings,'CAPTCHA_CHALLENGE_FUNCT','captcha.helpers.random_char_challenge') +CAPTCHA_NOISE_FUNCTIONS = getattr(settings,'CAPTCHA_NOISE_FUNCTIONS', ('captcha.helpers.noise_arcs','captcha.helpers.noise_dots',)) +CAPTCHA_FILTER_FUNCTIONS = getattr(settings,'CAPTCHA_FILTER_FUNCTIONS',('captcha.helpers.post_smooth',)) +CAPTCHA_WORDS_DICTIONARY = getattr(settings,'CAPTCHA_WORDS_DICTIONARY', '/usr/share/dict/words') +CAPTCHA_FLITE_PATH = getattr(settings,'CAPTCHA_FLITE_PATH',None) +CAPTCHA_TIMEOUT = getattr(settings, 'CAPTCHA_TIMEOUT', 5) # Minutes +CAPTCHA_LENGTH = int(getattr(settings, 'CAPTCHA_LENGTH', 4)) # Chars +CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings,'CAPTCHA_IMAGE_BEFORE_FIELD', True) +CAPTCHA_DICTIONARY_MIN_LENGTH = getattr(settings,'CAPTCHA_DICTIONARY_MIN_LENGTH', 0) +CAPTCHA_DICTIONARY_MAX_LENGTH = getattr(settings,'CAPTCHA_DICTIONARY_MAX_LENGTH', 99) +if CAPTCHA_IMAGE_BEFORE_FIELD: + CAPTCHA_OUTPUT_FORMAT = getattr(settings,'CAPTCHA_OUTPUT_FORMAT', u'%(image)s %(hidden_field)s %(text_field)s') +else: + CAPTCHA_OUTPUT_FORMAT = getattr(settings,'CAPTCHA_OUTPUT_FORMAT', u'%(hidden_field)s %(text_field)s %(image)s') + + +# Failsafe +if CAPTCHA_DICTIONARY_MIN_LENGTH > CAPTCHA_DICTIONARY_MAX_LENGTH: + CAPTCHA_DICTIONARY_MIN_LENGTH, CAPTCHA_DICTIONARY_MAX_LENGTH = CAPTCHA_DICTIONARY_MAX_LENGTH, CAPTCHA_DICTIONARY_MIN_LENGTH + + +def _callable_from_string(string_or_callable): + if callable(string_or_callable): + return string_or_callable + else: + return getattr(__import__( '.'.join(string_or_callable.split('.')[:-1]), {}, {}, ['']), string_or_callable.split('.')[-1]) + +def get_challenge(): + return _callable_from_string(CAPTCHA_CHALLENGE_FUNCT) + + +def noise_functions(): + if CAPTCHA_NOISE_FUNCTIONS: + return map(_callable_from_string, CAPTCHA_NOISE_FUNCTIONS) + return list() + +def filter_functions(): + if CAPTCHA_FILTER_FUNCTIONS: + return map(_callable_from_string, CAPTCHA_FILTER_FUNCTIONS) + return list() + diff --git a/DJAGEN/trunk/djagen/captcha/fields.py b/DJAGEN/trunk/djagen/captcha/fields.py new file mode 100755 index 0000000..7df0f03 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/fields.py @@ -0,0 +1,81 @@ +from django.forms.fields import CharField, MultiValueField +from django.forms import ValidationError +from django.forms.widgets import TextInput, MultiWidget, HiddenInput +from django.utils.safestring import mark_safe +from django.utils.translation import ugettext_lazy as _ +from django.core.urlresolvers import reverse +from captcha.models import CaptchaStore +from captcha.conf import settings +from captcha.helpers import * +import datetime + +class CaptchaTextInput(MultiWidget): + def __init__(self,attrs=None): + widgets = ( + HiddenInput(attrs), + TextInput(attrs), + ) + + for key in ('image','hidden_field','text_field'): + if '%%(%s)s'%key not in settings.CAPTCHA_OUTPUT_FORMAT: + raise KeyError('All of %s must be present in your CAPTCHA_OUTPUT_FORMAT setting. Could not find %s' %( + ', '.join(['%%(%s)s'%k for k in ('image','hidden_field','text_field')]), + '%%(%s)s'%key + )) + + super(CaptchaTextInput,self).__init__(widgets,attrs) + + def decompress(self,value): + if value: + return value.split(',') + return [None,None] + + def format_output(self, rendered_widgets): + hidden_field, text_field = rendered_widgets + return settings.CAPTCHA_OUTPUT_FORMAT %dict(image=self.image_and_audio, hidden_field=hidden_field, text_field=text_field) + + def render(self, name, value, attrs=None): + challenge,response= settings.get_challenge()() + + store = CaptchaStore.objects.create(challenge=challenge,response=response) + key = store.hashkey + value = [key, u''] + + self.image_and_audio = 'captcha' %reverse('captcha-image',kwargs=dict(key=key)) + if settings.CAPTCHA_FLITE_PATH: + self.image_and_audio = '%s' %( reverse('captcha-audio', kwargs=dict(key=key)), unicode(_('Play captcha as audio file')), self.image_and_audio) + #fields = super(CaptchaTextInput, self).render(name, value, attrs=attrs) + + return super(CaptchaTextInput, self).render(name, value, attrs=attrs) + +class CaptchaField(MultiValueField): + widget=CaptchaTextInput + + def __init__(self, *args,**kwargs): + fields = ( + CharField(show_hidden_initial=True), + CharField(), + ) + if 'error_messages' not in kwargs or 'invalid' not in kwargs.get('error_messages'): + if 'error_messages' not in kwargs: + kwargs['error_messages'] = dict() + kwargs['error_messages'].update(dict(invalid=_('Invalid CAPTCHA'))) + + + super(CaptchaField,self).__init__(fields=fields, *args, **kwargs) + + def compress(self,data_list): + if data_list: + return ','.join(data_list) + return None + + def clean(self, value): + super(CaptchaField, self).clean(value) + response, value[1] = value[1].strip().lower(), '' + CaptchaStore.remove_expired() + try: + store = CaptchaStore.objects.get(response=response, hashkey=value[0], expiration__gt=datetime.datetime.now()) + store.delete() + except Exception: + raise ValidationError(getattr(self,'error_messages',dict()).get('invalid', _('Invalid CAPTCHA'))) + return value diff --git a/DJAGEN/trunk/djagen/captcha/fonts/COPYRIGHT.TXT b/DJAGEN/trunk/djagen/captcha/fonts/COPYRIGHT.TXT new file mode 100755 index 0000000..e651be1 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/fonts/COPYRIGHT.TXT @@ -0,0 +1,124 @@ +Bitstream Vera Fonts Copyright + +The fonts have a generous copyright, allowing derivative works (as +long as "Bitstream" or "Vera" are not in the names), and full +redistribution (so long as they are not *sold* by themselves). They +can be be bundled, redistributed and sold with any software. + +The fonts are distributed under the following copyright: + +Copyright +========= + +Copyright (c) 2003 by Bitstream, Inc. All Rights Reserved. Bitstream +Vera is a trademark of Bitstream, Inc. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the fonts accompanying this license ("Fonts") and associated +documentation files (the "Font Software"), to reproduce and distribute +the Font Software, including without limitation the rights to use, +copy, merge, publish, distribute, and/or sell copies of the Font +Software, and to permit persons to whom the Font Software is furnished +to do so, subject to the following conditions: + +The above copyright and trademark notices and this permission notice +shall be included in all copies of one or more of the Font Software +typefaces. + +The Font Software may be modified, altered, or added to, and in +particular the designs of glyphs or characters in the Fonts may be +modified and additional glyphs or characters may be added to the +Fonts, only if the fonts are renamed to names not containing either +the words "Bitstream" or the word "Vera". + +This License becomes null and void to the extent applicable to Fonts +or Font Software that has been modified and is distributed under the +"Bitstream Vera" names. + +The Font Software may be sold as part of a larger software package but +no copy of one or more of the Font Software typefaces may be sold by +itself. + +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL +BITSTREAM OR THE GNOME FOUNDATION BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, +OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT +SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE. + +Except as contained in this notice, the names of Gnome, the Gnome +Foundation, and Bitstream Inc., shall not be used in advertising or +otherwise to promote the sale, use or other dealings in this Font +Software without prior written authorization from the Gnome Foundation +or Bitstream Inc., respectively. For further information, contact: +fonts at gnome dot org. + +Copyright FAQ +============= + + 1. I don't understand the resale restriction... What gives? + + Bitstream is giving away these fonts, but wishes to ensure its + competitors can't just drop the fonts as is into a font sale system + and sell them as is. It seems fair that if Bitstream can't make money + from the Bitstream Vera fonts, their competitors should not be able to + do so either. You can sell the fonts as part of any software package, + however. + + 2. I want to package these fonts separately for distribution and + sale as part of a larger software package or system. Can I do so? + + Yes. A RPM or Debian package is a "larger software package" to begin + with, and you aren't selling them independently by themselves. + See 1. above. + + 3. Are derivative works allowed? + Yes! + + 4. Can I change or add to the font(s)? + Yes, but you must change the name(s) of the font(s). + + 5. Under what terms are derivative works allowed? + + You must change the name(s) of the fonts. This is to ensure the + quality of the fonts, both to protect Bitstream and Gnome. We want to + ensure that if an application has opened a font specifically of these + names, it gets what it expects (though of course, using fontconfig, + substitutions could still could have occurred during font + opening). You must include the Bitstream copyright. Additional + copyrights can be added, as per copyright law. Happy Font Hacking! + + 6. If I have improvements for Bitstream Vera, is it possible they might get + adopted in future versions? + + Yes. The contract between the Gnome Foundation and Bitstream has + provisions for working with Bitstream to ensure quality additions to + the Bitstream Vera font family. Please contact us if you have such + additions. Note, that in general, we will want such additions for the + entire family, not just a single font, and that you'll have to keep + both Gnome and Jim Lyles, Vera's designer, happy! To make sense to add + glyphs to the font, they must be stylistically in keeping with Vera's + design. Vera cannot become a "ransom note" font. Jim Lyles will be + providing a document describing the design elements used in Vera, as a + guide and aid for people interested in contributing to Vera. + + 7. I want to sell a software package that uses these fonts: Can I do so? + + Sure. Bundle the fonts with your software and sell your software + with the fonts. That is the intent of the copyright. + + 8. If applications have built the names "Bitstream Vera" into them, + can I override this somehow to use fonts of my choosing? + + This depends on exact details of the software. Most open source + systems and software (e.g., Gnome, KDE, etc.) are now converting to + use fontconfig (see www.fontconfig.org) to handle font configuration, + selection and substitution; it has provisions for overriding font + names and subsituting alternatives. An example is provided by the + supplied local.conf file, which chooses the family Bitstream Vera for + "sans", "serif" and "monospace". Other software (e.g., the XFree86 + core server) has other mechanisms for font substitution. + diff --git a/DJAGEN/trunk/djagen/captcha/fonts/README.TXT b/DJAGEN/trunk/djagen/captcha/fonts/README.TXT new file mode 100755 index 0000000..0f71795 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/fonts/README.TXT @@ -0,0 +1,11 @@ +Contained herin is the Bitstream Vera font family. + +The Copyright information is found in the COPYRIGHT.TXT file (along +with being incoporated into the fonts themselves). + +The releases notes are found in the file "RELEASENOTES.TXT". + +We hope you enjoy Vera! + + Bitstream, Inc. + The Gnome Project diff --git a/DJAGEN/trunk/djagen/captcha/fonts/Vera.ttf b/DJAGEN/trunk/djagen/captcha/fonts/Vera.ttf new file mode 100755 index 0000000..58cd6b5 Binary files /dev/null and b/DJAGEN/trunk/djagen/captcha/fonts/Vera.ttf differ diff --git a/DJAGEN/trunk/djagen/captcha/helpers.py b/DJAGEN/trunk/djagen/captcha/helpers.py new file mode 100755 index 0000000..b400700 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/helpers.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +import random +from captcha.conf import settings + +def math_challenge(): + operators = ('+','*','-',) + operands = (random.randint(1,10),random.randint(1,10)) + operator = random.choice(operators) + if operands[0] < operands[1] and '-' == operator: + operands = (operands[1],operands[0]) + challenge = '%d%s%d' %(operands[0],operator,operands[1]) + return u'%s=' %(challenge), unicode(eval(challenge)) + +def random_char_challenge(): + chars,ret = u'abcdefghijklmnopqrstuvwxyz', u'' + for i in range(settings.CAPTCHA_LENGTH): + ret += random.choice(chars) + return ret.upper(),ret + +def unicode_challenge(): + chars,ret = u'äàáëéèïíîöóòüúù', u'' + for i in range(settings.CAPTCHA_LENGTH): + ret += random.choice(chars) + return ret.upper(), ret + +def word_challenge(): + fd = file(settings.CAPTCHA_WORDS_DICTIONARY,'rb') + l = fd.readlines() + fd.close() + while True: + word = random.choice(l).strip() + if len(word) >= settings.CAPTCHA_DICTIONARY_MIN_LENGTH and len(word) <= settings.CAPTCHA_DICTIONARY_MAX_LENGTH: + break + return word.upper(), word.lower() + +def noise_arcs(draw,image): + size = image.size + draw.arc([-20,-20, size[0],20], 0, 295, fill=settings.CAPTCHA_FOREGROUND_COLOR) + draw.line([-20,20, size[0]+20,size[1]-20], fill=settings.CAPTCHA_FOREGROUND_COLOR) + draw.line([-20,0, size[0]+20,size[1]], fill=settings.CAPTCHA_FOREGROUND_COLOR) + return draw + +def noise_dots(draw,image): + size = image.size + for p in range(int(size[0]*size[1]*0.1)): + draw.point((random.randint(0, size[0]),random.randint(0, size[1])), fill=settings.CAPTCHA_FOREGROUND_COLOR ) + return draw + +def post_smooth(image): + import ImageFilter + return image.filter(ImageFilter.SMOOTH) diff --git a/DJAGEN/trunk/djagen/captcha/management/__init__.py b/DJAGEN/trunk/djagen/captcha/management/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/trunk/djagen/captcha/management/commands/__init__.py b/DJAGEN/trunk/djagen/captcha/management/commands/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/trunk/djagen/captcha/management/commands/captcha_clean.py b/DJAGEN/trunk/djagen/captcha/management/commands/captcha_clean.py new file mode 100755 index 0000000..9a66e48 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/management/commands/captcha_clean.py @@ -0,0 +1,28 @@ +from django.core.management.base import BaseCommand, CommandError +import sys + +from optparse import make_option + +class Command(BaseCommand): + help = "Clean up expired captcha hashkeys." + + def handle(self, **options): + from captcha.models import CaptchaStore + import datetime + verbose = int(options.get('verbosity')) + expired_keys = CaptchaStore.objects.filter(expiration__lte=datetime.datetime.now()).count() + if verbose >= 1: + print "Currently %s expired hashkeys" % expired_keys + try: + CaptchaStore.remove_expired() + except: + if verbose >= 1 : + print "Unable to delete expired hashkeys." + sys.exit(1) + if verbose >= 1: + if expired_keys > 0: + print "Expired hashkeys removed." + else: + print "No keys to remove." + + diff --git a/DJAGEN/trunk/djagen/captcha/models.py b/DJAGEN/trunk/djagen/captcha/models.py new file mode 100755 index 0000000..fc8c599 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/models.py @@ -0,0 +1,46 @@ +from django.db import models +from captcha.conf import settings as captcha_settings +import datetime, unicodedata, random, time + +# Heavily based on session key generation in Django +# Use the system (hardware-based) random number generator if it exists. +if hasattr(random, 'SystemRandom'): + randrange = random.SystemRandom().randrange +else: + randrange = random.randrange +MAX_RANDOM_KEY = 18446744073709551616L # 2 << 63 + + +try: + import hashlib # sha for Python 2.5+ +except ImportError: + import sha # sha for Python 2.4 (deprecated in Python 2.6) + hashlib = False + +class CaptchaStore(models.Model): + challenge = models.CharField(blank=False, max_length=32) + response = models.CharField(blank=False, max_length=32) + hashkey = models.CharField(blank=False, max_length=40, unique=True) + expiration = models.DateTimeField(blank=False) + + def save(self,*args,**kwargs): + self.response = self.response.lower() + if not self.expiration: + self.expiration = datetime.datetime.now() + datetime.timedelta(minutes= int(captcha_settings.CAPTCHA_TIMEOUT)) + if not self.hashkey: + key_ = unicodedata.normalize('NFKD', str(randrange(0,MAX_RANDOM_KEY)) + str(time.time()) + unicode(self.challenge)).encode('ascii', 'ignore') + unicodedata.normalize('NFKD', unicode(self.response)).encode('ascii', 'ignore') + if hashlib: + self.hashkey = hashlib.new('sha', key_).hexdigest() + else: + self.hashkey = sha.new(key_).hexdigest() + del(key_) + super(CaptchaStore,self).save(*args,**kwargs) + + def __unicode__(self): + return self.challenge + + + def remove_expired(cls): + cls.objects.filter(expiration__lte=datetime.datetime.now()).delete() + remove_expired = classmethod(remove_expired) + diff --git a/DJAGEN/trunk/djagen/captcha/tests/__init__.py b/DJAGEN/trunk/djagen/captcha/tests/__init__.py new file mode 100755 index 0000000..ded5948 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/tests/__init__.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +from captcha.conf import settings +from captcha.models import CaptchaStore +from django.core.urlresolvers import reverse +from django.test import TestCase +from django.utils.translation import ugettext_lazy as _ +import datetime + + +class CaptchaCase(TestCase): + urls = 'captcha.tests.urls' + + def setUp(self): + self.default_challenge = settings.get_challenge()() + self.math_challenge = settings._callable_from_string('captcha.helpers.math_challenge')() + self.chars_challenge = settings._callable_from_string('captcha.helpers.random_char_challenge')() + self.unicode_challenge = settings._callable_from_string('captcha.helpers.unicode_challenge')() + + self.default_store, created = CaptchaStore.objects.get_or_create(challenge=self.default_challenge[0],response=self.default_challenge[1]) + self.math_store, created = CaptchaStore.objects.get_or_create(challenge=self.math_challenge[0],response=self.math_challenge[1]) + self.chars_store, created = CaptchaStore.objects.get_or_create(challenge=self.chars_challenge[0],response=self.chars_challenge[1]) + self.unicode_store, created = CaptchaStore.objects.get_or_create(challenge=self.unicode_challenge[0],response=self.unicode_challenge[1]) + + + + + def testImages(self): + for key in (self.math_store.hashkey, self.chars_store.hashkey, self.default_store.hashkey, self.unicode_store.hashkey): + response = self.client.get(reverse('captcha-image',kwargs=dict(key=key))) + self.failUnlessEqual(response.status_code, 200) + self.assertTrue(response.has_header('content-type')) + self.assertEquals(response._headers.get('content-type'), ('Content-Type', 'image/png')) + + def testAudio(self): + if not settings.CAPTCHA_FLITE_PATH: + return + for key in (self.math_store.hashkey, self.chars_store.hashkey, self.default_store.hashkey, self.unicode_store.hashkey): + response = self.client.get(reverse('captcha-audio',kwargs=dict(key=key))) + self.failUnlessEqual(response.status_code, 200) + self.assertTrue(len(response.content) > 1024) + self.assertTrue(response.has_header('content-type')) + self.assertEquals(response._headers.get('content-type'), ('Content-Type', 'audio/x-wav')) + + def testFormSubmit(self): + r = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r.status_code, 200) + hash_ = r.content[r.content.find('value="')+7:r.content.find('value="')+47] + try: + response = CaptchaStore.objects.get(hashkey=hash_).response + except: + self.fail() + + r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r.status_code, 200) + self.assertTrue(r.content.find('Form validated') > 0) + + r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r.status_code, 200) + self.assertFalse(r.content.find('Form validated') > 0) + + + + def testWrongSubmit(self): + r = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r.status_code, 200) + r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc',captcha_1='wrong response', subject='xxx', sender='asasd@asdasd.com')) + self.assertFormError(r,'form','captcha',_('Invalid CAPTCHA')) + + def testDeleteExpired(self): + self.default_store.expiration = datetime.datetime.now() - datetime.timedelta(minutes=5) + self.default_store.save() + hash_ = self.default_store.hashkey + r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=self.default_store.response, subject='xxx', sender='asasd@asdasd.com')) + + self.failUnlessEqual(r.status_code, 200) + self.assertFalse(r.content.find('Form validated') > 0) + + # expired -> deleted + try: + CaptchaStore.objects.get(hashkey=hash_) + self.fail() + except: + pass + + def testCustomErrorMessage(self): + r = self.client.get(reverse('captcha-test-custom-error-message')) + self.failUnlessEqual(r.status_code, 200) + + # Wrong answer + r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc',captcha_1='wrong response')) + self.assertFormError(r,'form','captcha','TEST CUSTOM ERROR MESSAGE') + # empty answer + r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc',captcha_1='')) + self.assertFormError(r,'form','captcha',_('This field is required.')) + + def testRepeatedChallenge(self): + store = CaptchaStore.objects.create(challenge='xxx',response='xxx') + try: + store2 = CaptchaStore.objects.create(challenge='xxx',response='xxx') + except Exception: + self.fail() + + + def testRepeatedChallengeFormSubmit(self): + settings.CAPTCHA_CHALLENGE_FUNCT = 'captcha.tests.trivial_challenge' + + r1 = self.client.get(reverse('captcha-test')) + r2 = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r1.status_code, 200) + self.failUnlessEqual(r2.status_code, 200) + hash_1 = r1.content[r1.content.find('value="')+7:r1.content.find('value="')+47] + hash_2 = r2.content[r2.content.find('value="')+7:r2.content.find('value="')+47] + try: + store_1 = CaptchaStore.objects.get(hashkey=hash_1) + store_2 = CaptchaStore.objects.get(hashkey=hash_2) + except: + self.fail() + + self.assertTrue(store_1.pk != store_2.pk) + self.assertTrue(store_1.response == store_2.response) + self.assertTrue(hash_1 != hash_2) + + + + r1 = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_1,captcha_1=store_1.response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r1.status_code, 200) + self.assertTrue(r1.content.find('Form validated') > 0) + + try: + store_2 = CaptchaStore.objects.get(hashkey=hash_2) + except: + self.fail() + + r2 = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_2,captcha_1=store_2.response, subject='xxx', sender='asasd@asdasd.com')) + self.failUnlessEqual(r2.status_code, 200) + self.assertTrue(r2.content.find('Form validated') > 0) + + def testOutputFormat(self): + settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s

    Hello, captcha world

    %(hidden_field)s%(text_field)s' + r = self.client.get(reverse('captcha-test')) + self.failUnlessEqual(r.status_code, 200) + self.assertTrue('

    Hello, captcha world

    ' in r.content) + + def testInvalidOutputFormat(self): + settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s' + try: + r = self.client.get(reverse('captcha-test')) + self.fail() + except KeyError: + pass + +def trivial_challenge(): + return 'trivial','trivial' diff --git a/DJAGEN/trunk/djagen/captcha/tests/urls.py b/DJAGEN/trunk/djagen/captcha/tests/urls.py new file mode 100755 index 0000000..78b6ee3 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/tests/urls.py @@ -0,0 +1,6 @@ +from django.conf.urls.defaults import * +urlpatterns = patterns('', + url(r'test/$','captcha.tests.views.test',name='captcha-test'), + url(r'test2/$','captcha.tests.views.test_custom_error_message',name='captcha-test-custom-error-message'), + url(r'',include('captcha.urls')), +) diff --git a/DJAGEN/trunk/djagen/captcha/tests/views.py b/DJAGEN/trunk/djagen/captcha/tests/views.py new file mode 100755 index 0000000..8b836c1 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/tests/views.py @@ -0,0 +1,58 @@ +from django import forms +from captcha.fields import CaptchaField +from django.template import Context, RequestContext, loader +from django.http import HttpResponse + + +TEST_TEMPLATE = r''' + + + + + captcha test + + + {% if passed %} +

    Form validated

    + {% endif %} +
    + {{form.as_p}} +

    +
    + + +''' + +def test(request): + + class CaptchaTestForm(forms.Form): + subject = forms.CharField(max_length=100) + sender = forms.EmailField() + captcha = CaptchaField(help_text='asdasd') + + if request.POST: + form = CaptchaTestForm(request.POST) + if form.is_valid(): + passed = True + else: + form = CaptchaTestForm() + + t = loader.get_template_from_string(TEST_TEMPLATE) + return HttpResponse(t.render(RequestContext(request, locals()))) + + +def test_custom_error_message(request): + + class CaptchaTestForm(forms.Form): + captcha = CaptchaField(help_text='asdasd', error_messages=dict(invalid='TEST CUSTOM ERROR MESSAGE')) + + if request.POST: + form = CaptchaTestForm(request.POST) + if form.is_valid(): + passed = True + else: + form = CaptchaTestForm() + + t = loader.get_template_from_string(TEST_TEMPLATE) + return HttpResponse(t.render(RequestContext(request, locals()))) diff --git a/DJAGEN/trunk/djagen/captcha/urls.py b/DJAGEN/trunk/djagen/captcha/urls.py new file mode 100755 index 0000000..c458668 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/urls.py @@ -0,0 +1,6 @@ +from django.conf.urls.defaults import * + +urlpatterns = patterns('captcha.views', + url(r'image/(?P\w+)/$','captcha_image',name='captcha-image'), + url(r'audio/(?P\w+)/$','captcha_audio',name='captcha-audio'), +) diff --git a/DJAGEN/trunk/djagen/captcha/views.py b/DJAGEN/trunk/djagen/captcha/views.py new file mode 100755 index 0000000..fec51f7 --- /dev/null +++ b/DJAGEN/trunk/djagen/captcha/views.py @@ -0,0 +1,92 @@ +from cStringIO import StringIO +from captcha.models import CaptchaStore +from django.http import HttpResponse, Http404 +from django.shortcuts import get_object_or_404 +import Image,ImageDraw,ImageFont,ImageFilter,random +from captcha.conf import settings +import re + +NON_DIGITS_RX = re.compile('[^\d]') + +def captcha_image(request,key): + store = get_object_or_404(CaptchaStore,hashkey=key) + text=store.challenge + + if settings.CAPTCHA_FONT_PATH.lower().strip().endswith('ttf'): + font = ImageFont.truetype(settings.CAPTCHA_FONT_PATH,settings.CAPTCHA_FONT_SIZE) + else: + font = ImageFont.load(settings.CAPTCHA_FONT_PATH) + + size = font.getsize(text) + size = (size[0]*2,size[1]) + image = Image.new('RGB', size , settings.CAPTCHA_BACKGROUND_COLOR) + + try: + PIL_VERSION = int(NON_DIGITS_RX.sub('',Image.VERSION)) + except: + PIL_VERSION = 116 + + + + xpos = 2 + for char in text: + fgimage = Image.new('RGB', size, settings.CAPTCHA_FOREGROUND_COLOR) + charimage = Image.new('L', font.getsize(' %s '%char), '#000000') + chardraw = ImageDraw.Draw(charimage) + chardraw.text((0,0), ' %s '%char, font=font, fill='#ffffff') + if settings.CAPTCHA_LETTER_ROTATION: + if PIL_VERSION >= 116: + charimage = charimage.rotate(random.randrange( *settings.CAPTCHA_LETTER_ROTATION ), expand=0, resample=Image.BICUBIC) + else: + charimage = charimage.rotate(random.randrange( *settings.CAPTCHA_LETTER_ROTATION ), resample=Image.BICUBIC) + charimage = charimage.crop(charimage.getbbox()) + maskimage = Image.new('L', size) + + maskimage.paste(charimage, (xpos, 4, xpos+charimage.size[0], 4+charimage.size[1] )) + size = maskimage.size + image = Image.composite(fgimage, image, maskimage) + xpos = xpos + 2 + charimage.size[0] + + image = image.crop((0,0,xpos+1,size[1])) + draw = ImageDraw.Draw(image) + + for f in settings.noise_functions(): + draw = f(draw,image) + for f in settings.filter_functions(): + image = f(image) + + out = StringIO() + image.save(out,"PNG") + out.seek(0) + + response = HttpResponse() + response['Content-Type'] = 'image/png' + response.write(out.read()) + + return response + +def captcha_audio(request,key): + if settings.CAPTCHA_FLITE_PATH: + store = get_object_or_404(CaptchaStore,hashkey=key) + text=store.challenge + if 'captcha.helpers.math_challenge' == settings.CAPTCHA_CHALLENGE_FUNCT: + text = text.replace('*','times').replace('-','minus') + else: + text = ', '.join(list(text)) + + import tempfile, os + + path = str(os.path.join(tempfile.gettempdir(),'%s.wav' %key)) + cline = '%s -t "%s" -o "%s"' %(settings.CAPTCHA_FLITE_PATH, text, path) + + os.popen(cline).read() + if os.path.isfile(path): + response = HttpResponse() + f = open(path,'rb') + response['Content-Type'] = 'audio/x-wav' + response.write(f.read()) + f.close() + os.unlink(path) + return response + + raise Http404 diff --git a/DJAGEN/trunk/djagen/collector/__init__.py b/DJAGEN/trunk/djagen/collector/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/trunk/djagen/collector/admin.py b/DJAGEN/trunk/djagen/collector/admin.py new file mode 100755 index 0000000..f6c9e20 --- /dev/null +++ b/DJAGEN/trunk/djagen/collector/admin.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django.contrib import admin +from djagen.collector.models import * + +from django.conf import settings + +import os +import datetime +import shutil + +from djagen.collector.configini import * + +class AuthorsAdmin (admin.ModelAdmin): + + list_display = ('author_id', 'author_name', 'author_email', 'author_face', 'current_status', 'is_approved', 'label_personal', 'label_lkd', 'label_community', 'label_eng') + list_select_related = True + + search_fields = ['author_name', 'author_surname', 'author_email'] + + def save_model(self, request, obj, form, change): + + #get the values for saving + author_name = obj.author_name + author_surname = obj.author_surname + author_face = obj.author_face + channel_url = obj.channel_url + + current_status = obj.current_status + is_approved = obj.is_approved + + #creating the history + now = datetime.datetime.now() + action_type = current_status + + author_id = obj.author_id + if author_id: + #then this is an update + author = Authors.objects.get(author_id = author_id) + pre_status = author.is_approved + current_status = obj.is_approved + obj.save() + else: + obj.save() + author = Authors.objects.get(author_name=author_name, author_surname=author_surname, channel_url=channel_url) + pre_status = None + current_status = author.is_approved + + author.history_set.create(action_type=action_type, action_date=now, action_owner=request.user.username) + + + #create tmp_config.ini here + handler = Handler(author.author_id) + handler.create_tmp_entries() + + if pre_status != current_status: + a_face = author.author_face + + images_path = os.path.join(settings.MAIN_PATH, 'www', 'images') + heads_path = os.path.join(images_path, 'heads') + face_path = os.path.join(heads_path, a_face) + + tmp_image_path = os.path.join(settinsg.MAIN_PATH, 'temp_ini', a_face) + + if os.path.exits(tmp_image_path): + shutil.move(tmp_image_path, face_path) + +class HistoryAdmin(admin.ModelAdmin): + list_display = ('action_type', 'action_date', 'action_author', 'action_owner') + +admin.site.register(History, HistoryAdmin) +admin.site.register(Authors, AuthorsAdmin) + diff --git a/DJAGEN/trunk/djagen/collector/configini.py b/DJAGEN/trunk/djagen/collector/configini.py new file mode 100755 index 0000000..af4f7ee --- /dev/null +++ b/DJAGEN/trunk/djagen/collector/configini.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +from django.conf import settings +from djagen.collector.models import * +import ConfigParser + +class Handler: + + def __init__(self, id): + + self.id = id + + self.tmp_entries_ini = os.path.join(settings.MAIN_PATH, 'tmp_ini', 'tmp_entries.ini') + + self.config_entries_ini = os.path.join(settings.MAIN_PATH, 'gezegen', 'config_entries.ini') + + def __set_values(self): + + author = Authors.objects.get(author_id = self.id) + + if not author.is_approved: + return False + + self.name = author.author_name + ' ' + author.author_surname + self.face = author.author_face + self.url = author.channel_url + + labels = {author.label_personal:'Personal', author.label_lkd: 'LKD', author.label_community: 'Community', author.label_eng: 'Eng'} + + label_li = [k for k,v in labels.iteritems() if v==1] + self.author_labels = " ".join(label_li) + + return True + + def create_tmp_entries(self): + + if not self.__set_values(): return + + config_entries = open(self.config_entries_ini) + tmp_entries = open(self.tmp_entries_ini, 'w') + + Config = ConfigParser.ConfigParser() + Config.read(self.config_entries_ini) + sections = Config.sections() + + for section in sections: + + config_name = Config.get(section, 'name') + config_label = Config.get(section, 'label') + config_id = Config.get(section, 'id') + config_url = section + + try: + config_face = Config.get(section, 'face') + except: + config_face = None + + if config_id == self.id: + + url = self.url + face = self.face + name = self.name + label = self.author_labels + id = self.id + + else: + + url = config_url + face = config_face + name = config_name + label = config_label + id = config_id + + s = url + '\n' + s += 'name = ' + name + '\n' + s += 'label = ' + label + '\n' + if face: + s += 'face = ' + face + '\n' + s += 'id = ' + id + '\n' + '\n' + + tmp_entries.write(s) + + tmp_entries.close() + + + + + + + + diff --git a/DJAGEN/trunk/djagen/collector/configxml.py b/DJAGEN/trunk/djagen/collector/configxml.py new file mode 100755 index 0000000..e952792 --- /dev/null +++ b/DJAGEN/trunk/djagen/collector/configxml.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +from xml.dom import minidom + +class Handler: + + def __init__(self): + + self.main_url = "/home/oguz/django-projects/djagen/gezegen" + self.gezegen_url = os.path.join(self.main_url,"gezegen") + self.entries_xml = os.path.join(self.gezegen_url, "config_entries.xml") + self.header_xml = os.path.join(self.gezegen_url, 'config_header.xml') + self.tmp_ini_dir_path = os.path.join(self.main_url, "tmp_ini") + + def get_doc(self, type="entries"): + + if type == "entries": + self.doc = minidom.parse(self.entries_xml) + else: + self.doc = minidom.parse(self.header_xml) + return self.doc + + def get_tag_entries(self,tag): + + self.entries = self.doc.getElementsByTagName(tag) + return self.entries + + def set_ini_variables(self, id, name, feed, nick, face, label): + + self.tmp_ini = {'id': id, 'name': name, 'feed': feed, 'nick': nick, 'face': face, 'label': label} + + def open_file(self): + path = os.path.join(self.tmp_ini_dir_path, 'tmp.ini') + self.f = open(path, "w") + + def create_header(self): + + for header in self.entries: + + children = header.childNodes + for child in children: + if child.nodeType == child.TEXT_NODE: continue + else: + node_name = child.nodeName + f_child = child.firstChild + node_value = f_child.nodeValue + + s = [] + if node_name != "header_name": + s.append(node_name) + s.append("=") + s.append(node_value) + s.append("\n") + ss = " ".join(s) + self.f.write(ss) + + def traverse(self): + + for entry in self.entries: + + nodes = entry.childNodes + + for node in nodes: + + child = node.firstChild + self.face = None + + if node.nodeType == node.TEXT_NODE: continue + + if node.nodeName == "feed": + self.feed = child.toxml() + + if node.nodeName == "name": + self.name = child.toxml() + + if node.nodeName == "nick": + self.nick = child.toxml() + + if node.nodeName == "label": + self.label = child.toxml() + + if node.nodeName == "face": + self.face = child.toxml() + + if node.nodeName == "id": + self.id = child.toxml() + + if int(self.tmp_ini['id']) == int(self.id): + + self.write_to_file(self.tmp_ini) + + else: + + config = {'id': self.id, 'name': self.name, 'feed': self.feed, 'nick': self.nick, 'label': self.label, 'face': self.face} + self.write_to_file(config) + + + def write_to_file(self, dic): + + feed = "feed = " + dic['feed'] + "\n" + name = "name = " + dic['name'] + "\n" + nick = "nick = " + dic['nick'] + "\n" + label = "label = " + dic['label'] + "\n" + id = "id = " + dic['id'] + "\n" + + self.f.write("\n") + self.f.write(feed) + self.f.write(name) + self.f.write(nick) + if dic['face']: + face = "face = " + dic['face'] + "\n" + self.f.write(face) + self.f.write(label) + self.f.write(id) + + def close_file(self): + self.f.close() + + diff --git a/DJAGEN/trunk/djagen/collector/forms.py b/DJAGEN/trunk/djagen/collector/forms.py new file mode 100755 index 0000000..e15bf4e --- /dev/null +++ b/DJAGEN/trunk/djagen/collector/forms.py @@ -0,0 +1,21 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django import forms +from captcha.fields import CaptchaField + +class ContactForm(forms.Form): + + name = forms.CharField(max_length=25, required=True, error_messages={'required': 'Lütfen adınızı giriniz'}, label='Adınız') + surname = forms.CharField(max_length=25, required=True, error_messages={'required': 'Lütfen soyadınızı giriniz'}, label='Soyadınız') + email = forms.EmailField(required=True, error_messages={'required': 'Size ulaşabileceğimiz eposta adresinizi giriniz'}, label='Eposta Adresiniz') + hackergotchi = forms.FileField(required=False, label='Hacketgotchiniz', help_text='Max 80*80 pixellik Gezegende görünmesini istediğiniz fotoğrafınız') + feed = forms.URLField(required=True, label='Besleme adresiniz', help_text='Günlüğünüzün XML kaynağının adresi') + message = forms.CharField(required=False, label='İletişim Mesajınız', widget=forms.widgets.Textarea()) + #field for captcha + captcha = CaptchaField(label="Captcha Alanı", help_text='Gördüğünü karakterleri aynen yazınız', error_messages={'required': 'Hatalı yazdınız!'}) + +class QueryForm(forms.Form): + name = forms.CharField(max_length=25, required = False, label = 'Adı') + surname = forms.CharField(max_length=25, required = False, label = 'Soyadı') + text = forms.CharField(required = False, label = 'Aradığınız metin', widget = forms.widgets.Textarea() ) diff --git a/DJAGEN/trunk/djagen/collector/models.py b/DJAGEN/trunk/djagen/collector/models.py new file mode 100755 index 0000000..eee5269 --- /dev/null +++ b/DJAGEN/trunk/djagen/collector/models.py @@ -0,0 +1,111 @@ +from django.db import models +import datetime, unicodedata, random, time +import re + +# Create your models here. +ACTION_CHOICES = ( + (1, u'Removed'), + (2, u'Approved'), + (3, u'Paused'), + (4, u'Readded'), + (5, u'Applied'), + (6, u'Editted') + ) + +class Authors (models.Model): + author_id = models.AutoField(primary_key=True, help_text="Author ID") + author_name = models.CharField(max_length=50, help_text="Author Name") + author_surname = models.CharField(max_length=50, help_text="Author Name") + #we dont keep emails at the config.ini files, this part should be entered at the admin page + author_email = models.EmailField(null=True, blank=True, help_text="Author Email Address") + #the png file name of the author + author_face = models.CharField(max_length=30, null=True, blank=True, help_text="Author Face Name") + channel_subtitle = models.TextField(null=True, blank=True, help_text="Channel Subtitle") + channel_title = models.TextField(null=True, blank=True, help_text="Channel Title") + #URL of the feed. + channel_url = models.URLField(help_text="Channel URL") + #Link to the original format feed + channel_link = models.URLField(null=True, blank=True, help_text="Channel Link") + channel_urlstatus = models.IntegerField(null=True, blank=True, help_text="Channel URL Status") + + #use this field to check whether the author is shown on the planet or not, like banned situations + current_status = models.SmallIntegerField(default=2, choices=ACTION_CHOICES, help_text="Current Status of the Author") + #whether the application to the planet is approved, the approved ones will be shown at the planet + is_approved = models.BooleanField(default=1, help_text="Approve Status of the Author") + + #planets that the channel belongs to + #at the config.ini the entries should be obe of the belows: + #label = Personal + #label = LKD + #label = Eng + #label = Community + label_personal = models.BooleanField(default=1, help_text="Channnels at the Personal Blog Page") + label_lkd = models.BooleanField(default=0, help_text="Channels that are belong to LKD Blogs") + label_community = models.BooleanField(default=0, help_text="Channels that are belong to some community blogs") + label_eng = models.BooleanField(default=0, help_text="Channels that have English entries") + #at the main page, lets just show personal and lkd for now, for communities lets ask them a special rss + + def __unicode__(self): + return u'%s %s' % (self.author_name, self.author_surname) + + class Meta: + #order according to the author_name, ascending + ordering = ['author_name'] + +# keep the history for the action that are done on the member urls +class History (models.Model): + action_type = models.SmallIntegerField(choices=ACTION_CHOICES) + action_date = models.DateTimeField() + action_explanation = models.TextField(help_text="Reason of Action", blank=True, null=True) + action_author = models.ForeignKey('Authors') + action_owner = models.CharField(max_length=20, help_text="The user who did the action") + + def __unicode__(self): + return str(self.action_type) + + class Meta: + #order descending, show the last actions at top + ordering = ['-action_date'] + +class Entries (models.Model): + id_hash = models.CharField(max_length=50, help_text="Hash of the ID", primary_key=True) + title = models.CharField(max_length=150, help_text="Entry Title") + content_html = models.TextField(help_text="Entry Orginal Content") + content_text = models.TextField(help_text="Entry Pure Text Content") + summary = models.TextField(help_text="Entry Summary", null=True, blank=True) + link = models.URLField(help_text="Link to Entry") + date = models.DateTimeField(help_text="Date of the entry") + entry_id = models.ForeignKey('Authors') + + def __unicode__(self): + + return self.title + + class Meta: + + ordering = ['-date'] + + + def sanitize(self, data): + p = re.compile(r'<[^<]*?/?>') + return p.sub('', data) + +class RunTime (models.Model): + run_time = models.DateTimeField(help_text="Run time of the planet script", auto_now=True) + + def __unicode__(self): + + return self.run_time + + class Meta: + + ordering = ['-run_time'] + + def get_run_time(self): + + dt = ".".join(map(lambda x: str(x), [self.run_time.day, self.run_time.month, self.run_time.year])) + hm = ":".join(map(lambda x: str(x), [self.run_time.hour, self.run_time.minute])) + + rslt = " ".join([dt, hm]) + return rslt + diff --git a/DJAGEN/trunk/djagen/collector/views.py b/DJAGEN/trunk/djagen/collector/views.py new file mode 100755 index 0000000..22f637f --- /dev/null +++ b/DJAGEN/trunk/djagen/collector/views.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- + +# View definitions are created here. +from django.shortcuts import render_to_response +from django.http import HttpResponse,HttpResponseRedirect +from djagen.collector.models import * +from djagen.collector.forms import ContactForm, QueryForm +from djagen.collector.wrappers import render_response +from django.conf import settings +import magic +import os +import datetime, time +from django.core.paginator import Paginator, EmptyPage, InvalidPage + +import string + +BASE_URL = settings.BASE_URL + +def main(request): + selected_entries = Entries.objects.select_related() + entries_list1 = selected_entries.filter(entry_id__label_personal = 1) + entries_list2 = selected_entries.filter(entry_id__label_lkd = 1) + entries_list3 = selected_entries.filter(entry_id__label_community = 1) + entries_list = entries_list1 | entries_list2 | entries_list3 + + # This setting gets the content truncated which contains more than words. + truncate_words = 250 + items_per_page = 25 + + #get the last run time + run_time = RunTime.objects.all()[0] + + #get the last entries' date + last_entry_date = Entries.objects.all()[0].date + day = datetime.timedelta(days=1) + last_date_li = [] + for x in xrange(6): + last_entry_date -= day + last_date_li.append(last_entry_date) + + return render_to_response('main/main.html' ,{ + 'entries_list':entries_list, + 'truncate_words':truncate_words, + 'items_per_page':repr(items_per_page), + 'run_time':run_time, + #'pag_entries_list':pag_entries_list, + 'BASE_URL': BASE_URL, + 'last_date_li': last_date_li, + }) +def member_subscribe(request): + if request.method == 'POST': + form = ContactForm(request.POST, request.FILES) + #return HttpResponse(str(request.FILES)) + if form.is_valid(): + human = True + try: + check = handle_uploaded_file(request.FILES['hackergotchi']) + except: + check = (False,False) + #save the author information + + f = request.FILES['hackergotchi'] + + if check[0]: + + #change the name of the file with the unique name created + f.name = check[1] + + author = Authors(author_name=request.POST['name'], author_surname=request.POST['surname'], author_email=request.POST['email'], channel_url=request.POST['feed'], author_face=f.name, is_approved=0, current_status=5) + else: + author = Authors(author_name=request.POST['name'], author_surname=request.POST['surname'], author_email=request.POST['email'], channel_url=request.POST['feed'], is_approved=0, current_status=5) + try: + author.save() + + #save the history with explanation + author.history_set.create(action_type=5, action_date=datetime.datetime.now(), action_explanation=request.POST['message']) + except: + pass + #send mail part + #fill it here + return render_response(request, 'main/subscribe.html/',{'submit': 'done', 'BASE_URL': BASE_URL}) + else: + form = ContactForm() + return render_response(request, 'main/subscribe.html', {'form': form, 'BASE_URL': BASE_URL}) + + +def handle_uploaded_file(f): + + if not f.name: return False + #lets create a unique name for the image + t = str(time.time()).split(".") + img_name = t[0] + t[1].f.name.split(".")[1] + f.name = img_name + path = os.path.join(settings.FILE_UPLOAD_TEMP_DIR, f.name) + + destination = open(path, 'wb+') + for chunk in f.chunks(): + destination.write(chunk) + destination.close() + + m = magic.open(magic.MAGIC_MIME) + m.load() + t = m.file(path) + if t.split('/')[0] == 'image': + return (True, f.name) + else: + os.unlink(path) + return (False, '') + +def list_members(request): + + authors = Authors.objects.all() + + return render_response(request, 'main/members.html', {'members': authors, 'BASE_URL': BASE_URL}) + +def query(request): + + return render_response(request,'main/query.html',{'BASE_URL' : BASE_URL}) + +def archive(request,archive_year='',archive_month=''): + + # This setting gets the content truncated which contains more than words. + truncate_words = 250 + items_per_page = 25 + + #get the last run time + run_time = RunTime.objects.all()[0] + + + ### Determine if the request includes any query or not. ### + if (request.GET): + # Switch to 'return the result of query' mode. + + #Querying + #TODO: We should improve the querying method here. + if ( ('q_author_name' in request.GET) and (request.GET['q_author_name'] )): + for item in Authors.objects.filter(author_name__icontains = request.GET['q_author_name']): + try: + entries_list |= item.entries_set.all() + except: + entries_list = item.entries_set.all() + + if (('q_author_surname' in request.GET) and (request.GET['q_author_surname'])): + for item in Authors.objects.filter(author_name__icontains = request.GET['q_author_surname']): + try: + entries_list |= item.entries_set.all() + except: + entries_list = item.entries_set.all() + + if( ('q_text' in request.GET)and(request.GET['q_text'])): + try: + entries_list |= Entries.objects.filter(content_text__icontains = request.GET['q_text']) + except: + entries_list = Entries.objects.filter(content_text__icontains = request.GET['q_text']) + try: + if(not(entries_list)): + return HttpResponseRedirect(BASE_URL+"/query") + except: + return HttpResponseRedirect(BASE_URL+ "/query") + #here is gonna be edited [X] + return render_to_response('main/main.html' ,{ + 'entries_list':entries_list, + #'p_entries_list':p_entries_list, + 'truncate_words':truncate_words, + 'items_per_page':repr(items_per_page), + 'run_time':run_time, + #'archive_year':archive_year, + #'archive_month':archive_month, + #'error':error, + 'BASE_URL':BASE_URL, + }) + ### If not ### + else: + #Switch to return the result of arguments provided mode. + + selected_entries = Entries.objects.select_related() + + # For entry categories + entries_list1 = selected_entries.filter(entry_id__label_personal = 1) + entries_list2 = selected_entries.filter(entry_id__label_lkd = 1) + entries_list3 = selected_entries.filter(entry_id__label_community = 1) + entries_list = entries_list1 | entries_list2 | entries_list3 + + # Validating arguments provided by urls.py. + if((archive_year != '' ) and (str(archive_year).isalnum()) and (not(str(archive_year).isalpha()))): + entries_list = entries_list.filter(date__year=archive_year) + else: + # Fall back to main view. + return HttpResponseRedirect(BASE_URL+"/main") + #else: + # error = 1 + + if(archive_month != ''and (str(archive_year).isalnum()) and not(str(archive_year).isalpha())): + entries_list = entries_list.filter(date__month=archive_month) + ## error = 1 + + + # Pagination + elements_in_a_page = 25 # This determines, how many elements will be displayed in a paginator page. + paginator = Paginator(entries_list,elements_in_a_page) + + # Validation for page number if it is not int return first page. + try: + page = int(request.GET.get('page', '1')) + except ValueError: + page = 1 + + # If page request is out of range, return last page . + try: + p_entries_list = paginator.page(page) + except (EmptyPage, InvalidPage): + p_entries_list = paginator.page(paginator.num_pages) + + + + + return render_to_response('main/main.html' ,{ + 'entries_list':entries_list, + 'p_entries_list':p_entries_list, + 'truncate_words':truncate_words, + 'items_per_page':repr(items_per_page), + 'run_time':run_time, + 'archive_year':archive_year, + 'archive_month':archive_month, + #'error':error, + 'BASE_URL':BASE_URL, + }) diff --git a/DJAGEN/trunk/djagen/collector/wrappers.py b/DJAGEN/trunk/djagen/collector/wrappers.py new file mode 100755 index 0000000..af35741 --- /dev/null +++ b/DJAGEN/trunk/djagen/collector/wrappers.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from django.shortcuts import render_to_response +from django.template import RequestContext + +def render_response(req, *args, **kwargs): + """ + Wrapper function that automatically adds "context_instance" to render_to_response + """ + + kwargs['context_instance'] = RequestContext(req) + return render_to_response(*args, **kwargs) diff --git a/DJAGEN/trunk/djagen/gezegen/__init__.py b/DJAGEN/trunk/djagen/gezegen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/__init__.py b/DJAGEN/trunk/djagen/gezegen/gezegen/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/atom.xml.tmpl b/DJAGEN/trunk/djagen/gezegen/gezegen/atom.xml.tmpl new file mode 100755 index 0000000..c444d01 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/atom.xml.tmpl @@ -0,0 +1,61 @@ + + + + <TMPL_VAR name> + "/> + "/> + + + + + + xml:lang=""> + xml:lang="<TMPL_VAR title_language>"</TMPL_IF>><TMPL_VAR title ESCAPE="HTML"> + "/> + + + xml:lang=""> + + + + + + + + + + + + + + + + + + + + + <TMPL_VAR channel_title ESCAPE="HTML"> + + <TMPL_VAR channel_name ESCAPE="HTML"> + + + + + "/> + + + + + + + + + + + + + + + + diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/atom.xml.tmplc b/DJAGEN/trunk/djagen/gezegen/gezegen/atom.xml.tmplc new file mode 100755 index 0000000..4939e63 Binary files /dev/null and b/DJAGEN/trunk/djagen/gezegen/gezegen/atom.xml.tmplc differ diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/config.ini b/DJAGEN/trunk/djagen/gezegen/gezegen/config.ini new file mode 100755 index 0000000..c54fd3b --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/config.ini @@ -0,0 +1,42 @@ +[Planet] +name = Linux Gezegeni +link = http://gezegen.linux.org.tr +owner_name = Gezegen Ekibi +owner_email = gezegen@linux.org.tr +cache_directory = cache +new_feed_items = 1 +log_level = DEBUG +template_files = gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl +output_dir = www/ +# items_per_page = 15 +items_per_page = 25 +#days_per_page = 0 +feed_timeout = 20 + +# future_dates = ignore_date +# ignore_in_feed = updated + +encoding = utf-8 +locale = tr_TR.UTF-8 + +date_format = %d %b %Y @ %I:%M %p +#date_format = %B %d, %Y %I:%M %p +new_date_format = %d %B %Y + +[DEFAULT] +facewidth = 64 +faceheight = 64 + + +[http://www.hakanuygun.com/blog/?feed=atom&cat=13] +name = Hakan Uygun +nick = huygun +label = Personal +id = 1 + +[http://feeds.feedburner.com/oguzy-gezegen] +name = Oğuz Yarımtepe +face = oguzyarimtepe.png +nick = oyarimtepe +label = Personal +id = 2 diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/config_entries.xml b/DJAGEN/trunk/djagen/gezegen/gezegen/config_entries.xml new file mode 100755 index 0000000..f9848a4 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/config_entries.xml @@ -0,0 +1,17 @@ + + + [http://www.bugunlinux.com/?feed=rss2] + Ahmet Yıldız + ayildiz + + 1 + + + + [http://www.bugunlinux.com/?feed=rss3] + Ahmet Yıldızz + ayildizz + + 2 + + diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/config_header.xml b/DJAGEN/trunk/djagen/gezegen/gezegen/config_header.xml new file mode 100755 index 0000000..949e8cf --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/config_header.xml @@ -0,0 +1,28 @@ + +
    + [Planet] + Linux Gezegeni + http://gezegen.linux.org.tr + Gezegen Ekibi + gezegen@linux.org.tr + cache + 1 + DEBUG + gezegen/index.html.tmpl gezegen/rss20.xml.tmpl gezegen/rss10.xml.tmpl gezegen/opml.xml.tmpl gezegen/foafroll.xml.tmpl gezegen/sidebar.html.tmpl gezegen/simple.html.tmpl gezegen/feeds.html.tmpl gezegen/atom.xml.tmpl + www/ + 25 + 20 + + utf-8 + tr_TR.UTF-8 + + %d %b %Y @ %I:%M %p + new_date_format = %d %B %Y +
    + +
    + [DEFAULT] + 64 + 64 +
    +
    diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/feeds.html.tmpl b/DJAGEN/trunk/djagen/gezegen/gezegen/feeds.html.tmpl new file mode 100755 index 0000000..acd9479 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/feeds.html.tmpl @@ -0,0 +1,22 @@ +
    + +
    diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/feeds.html.tmplc b/DJAGEN/trunk/djagen/gezegen/gezegen/feeds.html.tmplc new file mode 100755 index 0000000..155f4e4 Binary files /dev/null and b/DJAGEN/trunk/djagen/gezegen/gezegen/feeds.html.tmplc differ diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/foafroll.xml.tmpl b/DJAGEN/trunk/djagen/gezegen/gezegen/foafroll.xml.tmpl new file mode 100755 index 0000000..f344738 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/foafroll.xml.tmpl @@ -0,0 +1,31 @@ + + + + + + " /> + + + + + + + "> + + + " /> + + + + + + + + + diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/foafroll.xml.tmplc b/DJAGEN/trunk/djagen/gezegen/gezegen/foafroll.xml.tmplc new file mode 100755 index 0000000..d85d57a Binary files /dev/null and b/DJAGEN/trunk/djagen/gezegen/gezegen/foafroll.xml.tmplc differ diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/index.html.tmpl b/DJAGEN/trunk/djagen/gezegen/gezegen/index.html.tmpl new file mode 100755 index 0000000..7726f6b --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/index.html.tmpl @@ -0,0 +1,356 @@ + + + + <TMPL_VAR name> + + + + + + + + + + + + + + + + + + + + + +
    + + +
    + + +
    + + +

    +
    + +
    + + +
    +
    +
    + +

    + "> + + +

    + +
    +   +
    +
    +
    +
    +
    + +
    +
    +
    + + "> + + + + &title=" target="_blank"> + + + " target="_blank"> + + +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/index.html.tmplc b/DJAGEN/trunk/djagen/gezegen/gezegen/index.html.tmplc new file mode 100755 index 0000000..259931d Binary files /dev/null and b/DJAGEN/trunk/djagen/gezegen/gezegen/index.html.tmplc differ diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/opml.xml.tmpl b/DJAGEN/trunk/djagen/gezegen/gezegen/opml.xml.tmpl new file mode 100755 index 0000000..50bbabe --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/opml.xml.tmpl @@ -0,0 +1,16 @@ + + + + <TMPL_VAR name> + + + + + + + + + " xmlUrl=""/> + + + diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/opml.xml.tmplc b/DJAGEN/trunk/djagen/gezegen/gezegen/opml.xml.tmplc new file mode 100755 index 0000000..f9309f9 Binary files /dev/null and b/DJAGEN/trunk/djagen/gezegen/gezegen/opml.xml.tmplc differ diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/rss10.xml.tmpl b/DJAGEN/trunk/djagen/gezegen/gezegen/rss10.xml.tmpl new file mode 100755 index 0000000..0cd709b --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/rss10.xml.tmpl @@ -0,0 +1,37 @@ + + +"> + <TMPL_VAR name> + + - + + + + + " /> + + + + + + +"> + <TMPL_VAR channel_name><TMPL_IF title>: <TMPL_VAR title></TMPL_IF> + + + + + + + + + + + + diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/rss10.xml.tmplc b/DJAGEN/trunk/djagen/gezegen/gezegen/rss10.xml.tmplc new file mode 100755 index 0000000..18444f3 Binary files /dev/null and b/DJAGEN/trunk/djagen/gezegen/gezegen/rss10.xml.tmplc differ diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/rss20.xml.tmpl b/DJAGEN/trunk/djagen/gezegen/gezegen/rss20.xml.tmpl new file mode 100755 index 0000000..3ff7a11 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/rss20.xml.tmpl @@ -0,0 +1,30 @@ + + + + + <TMPL_VAR name> + + en + - + + + + <TMPL_VAR channel_name><TMPL_IF title>: <TMPL_VAR title></TMPL_IF> + + + + + + " align="right" width="" height="">]]> + + + + + + + + + + + + diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/rss20.xml.tmplc b/DJAGEN/trunk/djagen/gezegen/gezegen/rss20.xml.tmplc new file mode 100755 index 0000000..21f007a Binary files /dev/null and b/DJAGEN/trunk/djagen/gezegen/gezegen/rss20.xml.tmplc differ diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/sidebar.html.tmpl b/DJAGEN/trunk/djagen/gezegen/gezegen/sidebar.html.tmpl new file mode 100755 index 0000000..acfdf4c --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/sidebar.html.tmpl @@ -0,0 +1,17 @@ + diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/sidebar.html.tmplc b/DJAGEN/trunk/djagen/gezegen/gezegen/sidebar.html.tmplc new file mode 100755 index 0000000..50754dd Binary files /dev/null and b/DJAGEN/trunk/djagen/gezegen/gezegen/sidebar.html.tmplc differ diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/simple.html.tmpl b/DJAGEN/trunk/djagen/gezegen/gezegen/simple.html.tmpl new file mode 100755 index 0000000..2c20c6a --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/simple.html.tmpl @@ -0,0 +1,74 @@ + + + + + + + <TMPL_VAR name> + + + + + + + + + + + + + + + +

    +
    + +
    + + + +
    +
    + +

    ">

    +
    +
    +
    + +
    + + + +
    + +
    +
    +
    + + + + + + + + diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/simple.html.tmplc b/DJAGEN/trunk/djagen/gezegen/gezegen/simple.html.tmplc new file mode 100755 index 0000000..d466e42 Binary files /dev/null and b/DJAGEN/trunk/djagen/gezegen/gezegen/simple.html.tmplc differ diff --git a/DJAGEN/trunk/djagen/gezegen/gezegen/zaman.sh b/DJAGEN/trunk/djagen/gezegen/gezegen/zaman.sh new file mode 100755 index 0000000..e0c9a2b --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/gezegen/zaman.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +while read x +do + echo "$(date)::$x" +done diff --git a/DJAGEN/trunk/djagen/gezegen/planet-cache.py b/DJAGEN/trunk/djagen/gezegen/planet-cache.py new file mode 100755 index 0000000..9334583 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet-cache.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Planet cache tool. + +""" + +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + + +import os +import sys +import time +import dbhash +import ConfigParser + +import planet + + +def usage(): + print "Usage: planet-cache [options] CACHEFILE [ITEMID]..." + print + print "Examine and modify information in the Planet cache." + print + print "Channel Commands:" + print " -C, --channel Display known information on the channel" + print " -L, --list List items in the channel" + print " -K, --keys List all keys found in channel items" + print + print "Item Commands (need ITEMID):" + print " -I, --item Display known information about the item(s)" + print " -H, --hide Mark the item(s) as hidden" + print " -U, --unhide Mark the item(s) as not hidden" + print + print "Other Options:" + print " -h, --help Display this help message and exit" + sys.exit(0) + +def usage_error(msg, *args): + print >>sys.stderr, msg, " ".join(args) + print >>sys.stderr, "Perhaps you need --help ?" + sys.exit(1) + +def print_keys(item, title): + keys = item.keys() + keys.sort() + key_len = max([ len(k) for k in keys ]) + + print title + ":" + for key in keys: + if item.key_type(key) == item.DATE: + value = time.strftime(planet.TIMEFMT_ISO, item[key]) + else: + value = str(item[key]) + print " %-*s %s" % (key_len, key, fit_str(value, 74 - key_len)) + +def fit_str(string, length): + if len(string) <= length: + return string + else: + return string[:length-4] + " ..." + + +if __name__ == "__main__": + cache_file = None + want_ids = 0 + ids = [] + + command = None + + for arg in sys.argv[1:]: + if arg == "-h" or arg == "--help": + usage() + elif arg == "-C" or arg == "--channel": + if command is not None: + usage_error("Only one command option may be supplied") + command = "channel" + elif arg == "-L" or arg == "--list": + if command is not None: + usage_error("Only one command option may be supplied") + command = "list" + elif arg == "-K" or arg == "--keys": + if command is not None: + usage_error("Only one command option may be supplied") + command = "keys" + elif arg == "-I" or arg == "--item": + if command is not None: + usage_error("Only one command option may be supplied") + command = "item" + want_ids = 1 + elif arg == "-H" or arg == "--hide": + if command is not None: + usage_error("Only one command option may be supplied") + command = "hide" + want_ids = 1 + elif arg == "-U" or arg == "--unhide": + if command is not None: + usage_error("Only one command option may be supplied") + command = "unhide" + want_ids = 1 + elif arg.startswith("-"): + usage_error("Unknown option:", arg) + else: + if cache_file is None: + cache_file = arg + elif want_ids: + ids.append(arg) + else: + usage_error("Unexpected extra argument:", arg) + + if cache_file is None: + usage_error("Missing expected cache filename") + elif want_ids and not len(ids): + usage_error("Missing expected entry ids") + + # Open the cache file directly to get the URL it represents + try: + db = dbhash.open(cache_file) + url = db["url"] + db.close() + except dbhash.bsddb._db.DBError, e: + print >>sys.stderr, cache_file + ":", e.args[1] + sys.exit(1) + except KeyError: + print >>sys.stderr, cache_file + ": Probably not a cache file" + sys.exit(1) + + # Now do it the right way :-) + my_planet = planet.Planet(ConfigParser.ConfigParser()) + my_planet.cache_directory = os.path.dirname(cache_file) + channel = planet.Channel(my_planet, url) + + for item_id in ids: + if not channel.has_item(item_id): + print >>sys.stderr, item_id + ": Not in channel" + sys.exit(1) + + # Do the user's bidding + if command == "channel": + print_keys(channel, "Channel Keys") + + elif command == "item": + for item_id in ids: + item = channel.get_item(item_id) + print_keys(item, "Item Keys for %s" % item_id) + + elif command == "list": + print "Items in Channel:" + for item in channel.items(hidden=1, sorted=1): + print " " + item.id + print " " + time.strftime(planet.TIMEFMT_ISO, item.date) + if hasattr(item, "title"): + print " " + fit_str(item.title, 70) + if hasattr(item, "hidden"): + print " (hidden)" + + elif command == "keys": + keys = {} + for item in channel.items(): + for key in item.keys(): + keys[key] = 1 + + keys = keys.keys() + keys.sort() + + print "Keys used in Channel:" + for key in keys: + print " " + key + print + + print "Use --item to output values of particular items." + + elif command == "hide": + for item_id in ids: + item = channel.get_item(item_id) + if hasattr(item, "hidden"): + print item_id + ": Already hidden." + else: + item.hidden = "yes" + + channel.cache_write() + print "Done." + + elif command == "unhide": + for item_id in ids: + item = channel.get_item(item_id) + if hasattr(item, "hidden"): + del(item.hidden) + else: + print item_id + ": Not hidden." + + channel.cache_write() + print "Done." diff --git a/DJAGEN/trunk/djagen/gezegen/planet.py b/DJAGEN/trunk/djagen/gezegen/planet.py new file mode 100755 index 0000000..a245a76 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python +"""The Planet aggregator. + +A flexible and easy-to-use aggregator for generating websites. + +Visit http://www.planetplanet.org/ for more information and to download +the latest version. + +Requires Python 2.1, recommends 2.3. +""" + +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + +import datetime + +import os +import sys +import time +import locale +import urlparse + +import planet + +from ConfigParser import ConfigParser + +# Default configuration file path +CONFIG_FILE = "config.ini" + +# Defaults for the [Planet] config section +PLANET_NAME = "Unconfigured Planet" +PLANET_LINK = "Unconfigured Planet" +PLANET_FEED = None +OWNER_NAME = "Anonymous Coward" +OWNER_EMAIL = "" +LOG_LEVEL = "WARNING" +FEED_TIMEOUT = 20 # seconds + +# Default template file list +TEMPLATE_FILES = "examples/basic/planet.html.tmpl" + +#part for django api usage +import sys +import os +# In order to reduce integration issues, this path gets defined automatically. +sys.path.append(os.path.abspath('../..')) + +os.environ['DJANGO_SETTINGS_MODULE'] = 'djagen.settings' +from djagen.collector.models import * + +def config_get(config, section, option, default=None, raw=0, vars=None): + """Get a value from the configuration, with a default.""" + if config.has_option(section, option): + return config.get(section, option, raw=raw, vars=None) + else: + return default + +def main(): + config_file = CONFIG_FILE + offline = 0 + verbose = 0 + + for arg in sys.argv[1:]: + if arg == "-h" or arg == "--help": + print "Usage: planet [options] [CONFIGFILE]" + print + print "Options:" + print " -v, --verbose DEBUG level logging during update" + print " -o, --offline Update the Planet from the cache only" + print " -h, --help Display this help message and exit" + print + sys.exit(0) + elif arg == "-v" or arg == "--verbose": + verbose = 1 + elif arg == "-o" or arg == "--offline": + offline = 1 + elif arg.startswith("-"): + print >>sys.stderr, "Unknown option:", arg + sys.exit(1) + else: + config_file = arg + + # Read the configuration file + config = ConfigParser() + config.read(config_file) + if not config.has_section("Planet"): + print >>sys.stderr, "Configuration missing [Planet] section." + sys.exit(1) + + # Read the [Planet] config section + planet_name = config_get(config, "Planet", "name", PLANET_NAME) + planet_link = config_get(config, "Planet", "link", PLANET_LINK) + planet_feed = config_get(config, "Planet", "feed", PLANET_FEED) + owner_name = config_get(config, "Planet", "owner_name", OWNER_NAME) + owner_email = config_get(config, "Planet", "owner_email", OWNER_EMAIL) + if verbose: + log_level = "DEBUG" + else: + log_level = config_get(config, "Planet", "log_level", LOG_LEVEL) + feed_timeout = config_get(config, "Planet", "feed_timeout", FEED_TIMEOUT) + template_files = config_get(config, "Planet", "template_files", + TEMPLATE_FILES).split(" ") + + # Default feed to the first feed for which there is a template + if not planet_feed: + for template_file in template_files: + name = os.path.splitext(os.path.basename(template_file))[0] + if name.find('atom')>=0 or name.find('rss')>=0: + planet_feed = urlparse.urljoin(planet_link, name) + break + + # Define locale + if config.has_option("Planet", "locale"): + # The user can specify more than one locale (separated by ":") as + # fallbacks. + locale_ok = False + for user_locale in config.get("Planet", "locale").split(':'): + user_locale = user_locale.strip() + try: + locale.setlocale(locale.LC_ALL, user_locale) + except locale.Error: + pass + else: + locale_ok = True + break + if not locale_ok: + print >>sys.stderr, "Unsupported locale setting." + sys.exit(1) + + # Activate logging + planet.logging.basicConfig() + planet.logging.getLogger().setLevel(planet.logging.getLevelName(log_level)) + log = planet.logging.getLogger("planet.runner") + try: + log.warning + except: + log.warning = log.warn + + # timeoutsocket allows feedparser to time out rather than hang forever on + # ultra-slow servers. Python 2.3 now has this functionality available in + # the standard socket library, so under 2.3 you don't need to install + # anything. But you probably should anyway, because the socket module is + # buggy and timeoutsocket is better. + if feed_timeout: + try: + feed_timeout = float(feed_timeout) + except: + log.warning("Feed timeout set to invalid value '%s', skipping", feed_timeout) + feed_timeout = None + + if feed_timeout and not offline: + try: + from planet import timeoutsocket + timeoutsocket.setDefaultSocketTimeout(feed_timeout) + log.debug("Socket timeout set to %d seconds", feed_timeout) + except ImportError: + import socket + if hasattr(socket, 'setdefaulttimeout'): + log.debug("timeoutsocket not found, using python function") + socket.setdefaulttimeout(feed_timeout) + log.debug("Socket timeout set to %d seconds", feed_timeout) + else: + log.error("Unable to set timeout to %d seconds", feed_timeout) + + # run the planet + my_planet = planet.Planet(config) + my_planet.run(planet_name, planet_link, template_files, offline) + + + + ## This is where archiving is done! ## + #add the current channels to the db + channels = my_planet.channels() + for channel in channels: + + author_name = channel.name + + try: + author_face = channel.face + except: + author_face = None + try: + channel_subtitle = channel.subtitle + except: + channel_subtitle = None + try: + channel_title = channel.title + except: + channel_title = None + + channel_url = channel.url + + try: + channel_link = channel.link + except: + channel_link = None + + try: + channel_urlstatus = channel.url_status + except: + channel_urlstatus = None + + label = channel.label + + label_personal = 0 + label_lkd = 0 + label_community = 0 + label_eng = 0 + if label == "Personal": + label_personal = 1 + if label == "LKD": + label_lkd = 1 + if label == "Community": + label_community = 1 + if label == "Eng": + label_eng = 1 + + id = channel.id + + try: + author = Authors.objects.get(author_id=id) + + #update the values with the ones at the config file + author.author_name = author_name + #print author_name + author.author_face = author_face + author.channel_subtitle = channel_subtitle + author.channel_title = channel_title + author.channel_url = channel_url + author.channel_link = channel_link + author.channel_url_status = channel_urlstatus + author.label_personal = label_personal + author.label_lkd = label_lkd + author.label_community = label_community + author.label_eng = label_eng + + except Exception, ex: + #print ex + author = Authors(author_id=id, author_name=author_name, author_face=author_face, channel_subtitle=channel_subtitle, channel_title=channel_title, channel_url=channel_url, channel_link=channel_link, channel_urlstatus=channel_urlstatus, label_personal=label_personal, label_lkd=label_lkd, label_community=label_community, label_eng=label_eng) + + + author.save() + + #entry issues + items = channel.items() + for item in items: + id_hash = item.id_hash + + try: + entry = author.entries_set.get(id_hash = id_hash) + entry.title = item.title + entry.content_html = item.content + entry.content_text = entry.sanitize(item.content) + entry.summary = item.summary + entry.link = item.link + d = item.date + entry.date = datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5]) + except: + content_html = item.content + #content_text = entry.sanitize(content_html) + d = item.date + if not item.has_key('summary'): summary = None + else: summary = item.summary + entry = author.entries_set.create(id_hash=id_hash, title=item.title, content_html=item.content, summary=summary, link=item.link, date=datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5])) + entry.content_text = entry.sanitize(content_html) + + entry.save() + + #datetime issue + r = RunTime() + r.save() + + my_planet.generate_all_files(template_files, planet_name, + planet_link, planet_feed, owner_name, owner_email) + + +if __name__ == "__main__": + main() + diff --git a/DJAGEN/trunk/djagen/gezegen/planet/__init__.py b/DJAGEN/trunk/djagen/gezegen/planet/__init__.py new file mode 100755 index 0000000..7829731 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/__init__.py @@ -0,0 +1,969 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Planet aggregator library. + +This package is a library for developing web sites or software that +aggregate RSS, CDF and Atom feeds taken from elsewhere into a single, +combined feed. +""" + +__version__ = "2.0" +__authors__ = [ "Scott James Remnant ", + "Jeff Waugh " ] +__license__ = "Python" + +import locale + +# Modules available without separate import +import cache +import feedparser +import sanitize +import htmltmpl +import sgmllib +try: + import logging +except: + import compat_logging as logging + +# Limit the effect of "from planet import *" +__all__ = ("cache", "feedparser", "htmltmpl", "logging", + "Planet", "Channel", "NewsItem") + + +import os +import md5 +import time +import dbhash +import re + +try: + from xml.sax.saxutils import escape +except: + def escape(data): + return data.replace("&","&").replace(">",">").replace("<","<") + +# Version information (for generator headers) +VERSION = ("Planet/%s +http://www.planetplanet.org" % __version__) + +# Default User-Agent header to send when retreiving feeds +USER_AGENT = VERSION + " " + feedparser.USER_AGENT + +# Default cache directory +CACHE_DIRECTORY = "cache" + +# Default number of items to display from a new feed +NEW_FEED_ITEMS = 10 + +# Useful common date/time formats +TIMEFMT_ISO = "%Y-%m-%dT%H:%M:%S+00:00" +TIMEFMT_822 = "%a, %d %b %Y %H:%M:%S +0000" + + +# Log instance to use here +log = logging.getLogger("planet") +try: + log.warning +except: + log.warning = log.warn + +# Defaults for the template file config sections +ENCODING = "utf-8" +ITEMS_PER_PAGE = 60 +DAYS_PER_PAGE = 0 +OUTPUT_DIR = "output" +DATE_FORMAT = "%B %d, %Y %I:%M %p" +NEW_DATE_FORMAT = "%B %d, %Y" +ACTIVITY_THRESHOLD = 0 + +class stripHtml(sgmllib.SGMLParser): + "remove all tags from the data" + def __init__(self, data): + sgmllib.SGMLParser.__init__(self) + self.result='' + self.feed(data) + self.close() + def handle_data(self, data): + if data: self.result+=data + +def template_info(item, date_format): + """Produce a dictionary of template information.""" + info = {} + + #set the locale so that the dates at the feeds will be in english + lc=locale.getlocale() + if lc[0] == None: + try: + locale.setlocale(locale.LC_ALL, '') + except: + pass + elif lc[0].find("tr") != -1: + try: + locale.setlocale(locale.LC_ALL, '') + except: + pass + + for key in item.keys(): + if item.key_type(key) == item.DATE: + date = item.get_as_date(key) + info[key] = time.strftime(date_format, date) + info[key + "_iso"] = time.strftime(TIMEFMT_ISO, date) + info[key + "_822"] = time.strftime(TIMEFMT_822, date) + else: + info[key] = item[key] + if 'title' in item.keys(): + info['title_plain'] = stripHtml(info['title']).result + + return info + + +class Planet: + """A set of channels. + + This class represents a set of channels for which the items will + be aggregated together into one combined feed. + + Properties: + user_agent User-Agent header to fetch feeds with. + cache_directory Directory to store cached channels in. + new_feed_items Number of items to display from a new feed. + filter A regular expression that articles must match. + exclude A regular expression that articles must not match. + """ + def __init__(self, config): + self.config = config + + self._channels = [] + + self.user_agent = USER_AGENT + self.cache_directory = CACHE_DIRECTORY + self.new_feed_items = NEW_FEED_ITEMS + self.filter = None + self.exclude = None + + def tmpl_config_get(self, template, option, default=None, raw=0, vars=None): + """Get a template value from the configuration, with a default.""" + if self.config.has_option(template, option): + return self.config.get(template, option, raw=raw, vars=None) + elif self.config.has_option("Planet", option): + return self.config.get("Planet", option, raw=raw, vars=None) + else: + return default + + def gather_channel_info(self, template_file="Planet"): + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + + activity_threshold = int(self.tmpl_config_get(template_file, + "activity_threshold", + ACTIVITY_THRESHOLD)) + + if activity_threshold: + activity_horizon = \ + time.gmtime(time.time()-86400*activity_threshold) + else: + activity_horizon = 0 + + channels = {} + channels_list = [] + for channel in self.channels(hidden=1): + channels[channel] = template_info(channel, date_format) + channels_list.append(channels[channel]) + + # identify inactive feeds + if activity_horizon: + latest = channel.items(sorted=1) + if len(latest)==0 or latest[0].date < activity_horizon: + channels[channel]["message"] = \ + "no activity in %d days" % activity_threshold + + # report channel level errors + if not channel.url_status: continue + status = int(channel.url_status) + if status == 403: + channels[channel]["message"] = "403: forbidden" + elif status == 404: + channels[channel]["message"] = "404: not found" + elif status == 408: + channels[channel]["message"] = "408: request timeout" + elif status == 410: + channels[channel]["message"] = "410: gone" + elif status == 500: + channels[channel]["message"] = "internal server error" + elif status >= 400: + channels[channel]["message"] = "http status %s" % status + + return channels, channels_list + + def gather_items_info(self, channels, template_file="Planet", channel_list=None): + items_list = [] + prev_date = [] + prev_channel = None + + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + items_per_page = int(self.tmpl_config_get(template_file, + "items_per_page", ITEMS_PER_PAGE)) + days_per_page = int(self.tmpl_config_get(template_file, + "days_per_page", DAYS_PER_PAGE)) + new_date_format = self.tmpl_config_get(template_file, + "new_date_format", NEW_DATE_FORMAT, raw=1) + + for newsitem in self.items(max_items=items_per_page, + max_days=days_per_page, + channels=channel_list): + item_info = template_info(newsitem, date_format) + chan_info = channels[newsitem._channel] + for k, v in chan_info.items(): + item_info["channel_" + k] = v + + # Check for the start of a new day + if prev_date[:3] != newsitem.date[:3]: + prev_date = newsitem.date + item_info["new_date"] = time.strftime(new_date_format, + newsitem.date) + + # Check for the start of a new channel + if item_info.has_key("new_date") \ + or prev_channel != newsitem._channel: + prev_channel = newsitem._channel + item_info["new_channel"] = newsitem._channel.url + + items_list.append(item_info) + + return items_list + + def run(self, planet_name, planet_link, template_files, offline = False): + log = logging.getLogger("planet.runner") + + # Create a planet + log.info("Loading cached data") + if self.config.has_option("Planet", "cache_directory"): + self.cache_directory = self.config.get("Planet", "cache_directory") + if self.config.has_option("Planet", "new_feed_items"): + self.new_feed_items = int(self.config.get("Planet", "new_feed_items")) + self.user_agent = "%s +%s %s" % (planet_name, planet_link, + self.user_agent) + if self.config.has_option("Planet", "filter"): + self.filter = self.config.get("Planet", "filter") + + # The other configuration blocks are channels to subscribe to + for feed_url in self.config.sections(): + if feed_url == "Planet" or feed_url in template_files: + continue + log.info(feed_url) + # Create a channel, configure it and subscribe it + channel = Channel(self, feed_url) + self.subscribe(channel) + + # Update it + try: + if not offline and not channel.url_status == '410': + channel.update() + except KeyboardInterrupt: + raise + except: + log.exception("Update of <%s> failed", feed_url) + + def generate_all_files(self, template_files, planet_name, + planet_link, planet_feed, owner_name, owner_email): + + log = logging.getLogger("planet.runner") + # Go-go-gadget-template + for template_file in template_files: + manager = htmltmpl.TemplateManager() + log.info("Processing template %s", template_file) + try: + template = manager.prepare(template_file) + except htmltmpl.TemplateError: + template = manager.prepare(os.path.basename(template_file)) + # Read the configuration + output_dir = self.tmpl_config_get(template_file, + "output_dir", OUTPUT_DIR) + date_format = self.tmpl_config_get(template_file, + "date_format", DATE_FORMAT, raw=1) + encoding = self.tmpl_config_get(template_file, "encoding", ENCODING) + + # We treat each template individually + base = os.path.splitext(os.path.basename(template_file))[0] + url = os.path.join(planet_link, base) + output_file = os.path.join(output_dir, base) + + # Gather information + channels, channels_list = self.gather_channel_info(template_file) + items_list = self.gather_items_info(channels, template_file) + + # Gather item information + + # Process the template + tp = htmltmpl.TemplateProcessor(html_escape=0) + tp.set("Items", items_list) + tp.set("Channels", channels_list) + + # Generic information + tp.set("generator", VERSION) + tp.set("name", planet_name) + tp.set("link", planet_link) + tp.set("owner_name", owner_name) + tp.set("owner_email", owner_email) + tp.set("url", url) + + if planet_feed: + tp.set("feed", planet_feed) + tp.set("feedtype", planet_feed.find('rss')>=0 and 'rss' or 'atom') + + # Update time + date = time.localtime() + tp.set("date", time.strftime(date_format, date)) + tp.set("date_iso", time.strftime(TIMEFMT_ISO, date)) + tp.set("date_822", time.strftime(TIMEFMT_822, date)) + + try: + log.info("Writing %s", output_file) + output_fd = open(output_file, "w") + if encoding.lower() in ("utf-8", "utf8"): + # UTF-8 output is the default because we use that internally + output_fd.write(tp.process(template)) + elif encoding.lower() in ("xml", "html", "sgml"): + # Magic for Python 2.3 users + output = tp.process(template).decode("utf-8") + output_fd.write(output.encode("ascii", "xmlcharrefreplace")) + else: + # Must be a "known" encoding + output = tp.process(template).decode("utf-8") + output_fd.write(output.encode(encoding, "replace")) + output_fd.close() + except KeyboardInterrupt: + raise + except: + log.exception("Write of %s failed", output_file) + + def channels(self, hidden=0, sorted=1): + """Return the list of channels.""" + channels = [] + for channel in self._channels: + if hidden or not channel.has_key("hidden"): + channels.append((channel.name, channel)) + + if sorted: + channels.sort() + + return [ c[-1] for c in channels ] + + def find_by_basename(self, basename): + for channel in self._channels: + if basename == channel.cache_basename(): return channel + + def subscribe(self, channel): + """Subscribe the planet to the channel.""" + self._channels.append(channel) + + def unsubscribe(self, channel): + """Unsubscribe the planet from the channel.""" + self._channels.remove(channel) + + def items(self, hidden=0, sorted=1, max_items=0, max_days=0, channels=None): + """Return an optionally filtered list of items in the channel. + + The filters are applied in the following order: + + If hidden is true then items in hidden channels and hidden items + will be returned. + + If sorted is true then the item list will be sorted with the newest + first. + + If max_items is non-zero then this number of items, at most, will + be returned. + + If max_days is non-zero then any items older than the newest by + this number of days won't be returned. Requires sorted=1 to work. + + + The sharp-eyed will note that this looks a little strange code-wise, + it turns out that Python gets *really* slow if we try to sort the + actual items themselves. Also we use mktime here, but it's ok + because we discard the numbers and just need them to be relatively + consistent between each other. + """ + planet_filter_re = None + if self.filter: + planet_filter_re = re.compile(self.filter, re.I) + planet_exclude_re = None + if self.exclude: + planet_exclude_re = re.compile(self.exclude, re.I) + + items = [] + seen_guids = {} + if not channels: channels=self.channels(hidden=hidden, sorted=0) + for channel in channels: + for item in channel._items.values(): + if hidden or not item.has_key("hidden"): + + channel_filter_re = None + if channel.filter: + channel_filter_re = re.compile(channel.filter, + re.I) + channel_exclude_re = None + if channel.exclude: + channel_exclude_re = re.compile(channel.exclude, + re.I) + if (planet_filter_re or planet_exclude_re \ + or channel_filter_re or channel_exclude_re): + title = "" + if item.has_key("title"): + title = item.title + content = item.get_content("content") + + if planet_filter_re: + if not (planet_filter_re.search(title) \ + or planet_filter_re.search(content)): + continue + + if planet_exclude_re: + if (planet_exclude_re.search(title) \ + or planet_exclude_re.search(content)): + continue + + if channel_filter_re: + if not (channel_filter_re.search(title) \ + or channel_filter_re.search(content)): + continue + + if channel_exclude_re: + if (channel_exclude_re.search(title) \ + or channel_exclude_re.search(content)): + continue + + if not seen_guids.has_key(item.id): + seen_guids[item.id] = 1; + items.append((time.mktime(item.date), item.order, item)) + + # Sort the list + if sorted: + items.sort() + items.reverse() + + # Apply max_items filter + if len(items) and max_items: + items = items[:max_items] + + # Apply max_days filter + if len(items) and max_days: + max_count = 0 + max_time = items[0][0] - max_days * 84600 + for item in items: + if item[0] > max_time: + max_count += 1 + else: + items = items[:max_count] + break + + return [ i[-1] for i in items ] + +class Channel(cache.CachedInfo): + """A list of news items. + + This class represents a list of news items taken from the feed of + a website or other source. + + Properties: + url URL of the feed. + url_etag E-Tag of the feed URL. + url_modified Last modified time of the feed URL. + url_status Last HTTP status of the feed URL. + hidden Channel should be hidden (True if exists). + name Name of the feed owner, or feed title. + next_order Next order number to be assigned to NewsItem + + updated Correct UTC-Normalised update time of the feed. + last_updated Correct UTC-Normalised time the feed was last updated. + + id An identifier the feed claims is unique (*). + title One-line title (*). + link Link to the original format feed (*). + tagline Short description of the feed (*). + info Longer description of the feed (*). + + modified Date the feed claims to have been modified (*). + + author Name of the author (*). + publisher Name of the publisher (*). + generator Name of the feed generator (*). + category Category name (*). + copyright Copyright information for humans to read (*). + license Link to the licence for the content (*). + docs Link to the specification of the feed format (*). + language Primary language (*). + errorreportsto E-Mail address to send error reports to (*). + + image_url URL of an associated image (*). + image_link Link to go with the associated image (*). + image_title Alternative text of the associated image (*). + image_width Width of the associated image (*). + image_height Height of the associated image (*). + + filter A regular expression that articles must match. + exclude A regular expression that articles must not match. + + Properties marked (*) will only be present if the original feed + contained them. Note that the optional 'modified' date field is simply + a claim made by the item and parsed from the information given, 'updated' + (and 'last_updated') are far more reliable sources of information. + + Some feeds may define additional properties to those above. + """ + IGNORE_KEYS = ("links", "contributors", "textinput", "cloud", "categories", + "url", "href", "url_etag", "url_modified", "tags", "itunes_explicit") + + def __init__(self, planet, url): + if not os.path.isdir(planet.cache_directory): + os.makedirs(planet.cache_directory) + cache_filename = cache.filename(planet.cache_directory, url) + cache_file = dbhash.open(cache_filename, "c", 0666) + + cache.CachedInfo.__init__(self, cache_file, url, root=1) + + self._items = {} + self._planet = planet + self._expired = [] + self.url = url + # retain the original URL for error reporting + self.configured_url = url + self.url_etag = None + self.url_status = None + self.url_modified = None + self.name = None + self.updated = None + self.last_updated = None + self.filter = None + self.exclude = None + self.next_order = "0" + self.cache_read() + self.cache_read_entries() + + if planet.config.has_section(url): + for option in planet.config.options(url): + value = planet.config.get(url, option) + self.set_as_string(option, value, cached=0) + + def has_item(self, id_): + """Check whether the item exists in the channel.""" + return self._items.has_key(id_) + + def get_item(self, id_): + """Return the item from the channel.""" + return self._items[id_] + + # Special methods + __contains__ = has_item + + def items(self, hidden=0, sorted=0): + """Return the item list.""" + items = [] + for item in self._items.values(): + if hidden or not item.has_key("hidden"): + items.append((time.mktime(item.date), item.order, item)) + + if sorted: + items.sort() + items.reverse() + + return [ i[-1] for i in items ] + + def __iter__(self): + """Iterate the sorted item list.""" + return iter(self.items(sorted=1)) + + def cache_read_entries(self): + """Read entry information from the cache.""" + keys = self._cache.keys() + for key in keys: + if key.find(" ") != -1: continue + if self.has_key(key): continue + + item = NewsItem(self, key) + self._items[key] = item + + def cache_basename(self): + return cache.filename('',self._id) + + def cache_write(self, sync=1): + + """Write channel and item information to the cache.""" + for item in self._items.values(): + item.cache_write(sync=0) + for item in self._expired: + item.cache_clear(sync=0) + cache.CachedInfo.cache_write(self, sync) + + self._expired = [] + + def feed_information(self): + """ + Returns a description string for the feed embedded in this channel. + + This will usually simply be the feed url embedded in <>, but in the + case where the current self.url has changed from the original + self.configured_url the string will contain both pieces of information. + This is so that the URL in question is easier to find in logging + output: getting an error about a URL that doesn't appear in your config + file is annoying. + """ + if self.url == self.configured_url: + return "<%s>" % self.url + else: + return "<%s> (formerly <%s>)" % (self.url, self.configured_url) + + def update(self): + """Download the feed to refresh the information. + + This does the actual work of pulling down the feed and if it changes + updates the cached information about the feed and entries within it. + """ + info = feedparser.parse(self.url, + etag=self.url_etag, modified=self.url_modified, + agent=self._planet.user_agent) + if info.has_key("status"): + self.url_status = str(info.status) + elif info.has_key("entries") and len(info.entries)>0: + self.url_status = str(200) + elif info.bozo and info.bozo_exception.__class__.__name__=='Timeout': + self.url_status = str(408) + else: + self.url_status = str(500) + + if self.url_status == '301' and \ + (info.has_key("entries") and len(info.entries)>0): + log.warning("Feed has moved from <%s> to <%s>", self.url, info.url) + try: + os.link(cache.filename(self._planet.cache_directory, self.url), + cache.filename(self._planet.cache_directory, info.url)) + except: + pass + self.url = info.url + elif self.url_status == '304': + log.info("Feed %s unchanged", self.feed_information()) + return + elif self.url_status == '410': + log.info("Feed %s gone", self.feed_information()) + self.cache_write() + return + elif self.url_status == '408': + log.warning("Feed %s timed out", self.feed_information()) + return + elif int(self.url_status) >= 400: + log.error("Error %s while updating feed %s", + self.url_status, self.feed_information()) + return + else: + log.info("Updating feed %s", self.feed_information()) + + self.url_etag = info.has_key("etag") and info.etag or None + self.url_modified = info.has_key("modified") and info.modified or None + if self.url_etag is not None: + log.debug("E-Tag: %s", self.url_etag) + if self.url_modified is not None: + log.debug("Last Modified: %s", + time.strftime(TIMEFMT_ISO, self.url_modified)) + + self.update_info(info.feed) + self.update_entries(info.entries) + self.cache_write() + + def update_info(self, feed): + """Update information from the feed. + + This reads the feed information supplied by feedparser and updates + the cached information about the feed. These are the various + potentially interesting properties that you might care about. + """ + for key in feed.keys(): + if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS: + # Ignored fields + pass + elif feed.has_key(key + "_parsed"): + # Ignore unparsed date fields + pass + elif key.endswith("_detail"): + # retain name and email sub-fields + if feed[key].has_key('name') and feed[key].name: + self.set_as_string(key.replace("_detail","_name"), \ + feed[key].name) + if feed[key].has_key('email') and feed[key].email: + self.set_as_string(key.replace("_detail","_email"), \ + feed[key].email) + elif key == "items": + # Ignore items field + pass + elif key.endswith("_parsed"): + # Date fields + if feed[key] is not None: + self.set_as_date(key[:-len("_parsed")], feed[key]) + elif key == "image": + # Image field: save all the information + if feed[key].has_key("url"): + self.set_as_string(key + "_url", feed[key].url) + if feed[key].has_key("link"): + self.set_as_string(key + "_link", feed[key].link) + if feed[key].has_key("title"): + self.set_as_string(key + "_title", feed[key].title) + if feed[key].has_key("width"): + self.set_as_string(key + "_width", str(feed[key].width)) + if feed[key].has_key("height"): + self.set_as_string(key + "_height", str(feed[key].height)) + elif isinstance(feed[key], (str, unicode)): + # String fields + try: + detail = key + '_detail' + if feed.has_key(detail) and feed[detail].has_key('type'): + if feed[detail].type == 'text/html': + feed[key] = sanitize.HTML(feed[key]) + elif feed[detail].type == 'text/plain': + feed[key] = escape(feed[key]) + self.set_as_string(key, feed[key]) + except KeyboardInterrupt: + raise + except: + log.exception("Ignored '%s' of <%s>, unknown format", + key, self.url) + + def update_entries(self, entries): + """Update entries from the feed. + + This reads the entries supplied by feedparser and updates the + cached information about them. It's at this point we update + the 'updated' timestamp and keep the old one in 'last_updated', + these provide boundaries for acceptable entry times. + + If this is the first time a feed has been updated then most of the + items will be marked as hidden, according to Planet.new_feed_items. + + If the feed does not contain items which, according to the sort order, + should be there; those items are assumed to have been expired from + the feed or replaced and are removed from the cache. + """ + if not len(entries): + return + + self.last_updated = self.updated + self.updated = time.gmtime() + + new_items = [] + feed_items = [] + for entry in entries: + # Try really hard to find some kind of unique identifier + if entry.has_key("id"): + entry_id = cache.utf8(entry.id) + elif entry.has_key("link"): + entry_id = cache.utf8(entry.link) + elif entry.has_key("title"): + entry_id = (self.url + "/" + + md5.new(cache.utf8(entry.title)).hexdigest()) + elif entry.has_key("summary"): + entry_id = (self.url + "/" + + md5.new(cache.utf8(entry.summary)).hexdigest()) + else: + log.error("Unable to find or generate id, entry ignored") + continue + + # Create the item if necessary and update + if self.has_item(entry_id): + item = self._items[entry_id] + else: + item = NewsItem(self, entry_id) + self._items[entry_id] = item + new_items.append(item) + item.update(entry) + feed_items.append(entry_id) + + # Hide excess items the first time through + if self.last_updated is None and self._planet.new_feed_items \ + and len(feed_items) > self._planet.new_feed_items: + item.hidden = "yes" + log.debug("Marked <%s> as hidden (new feed)", entry_id) + + # Assign order numbers in reverse + new_items.reverse() + for item in new_items: + item.order = self.next_order = str(int(self.next_order) + 1) + + # Check for expired or replaced items + feed_count = len(feed_items) + log.debug("Items in Feed: %d", feed_count) + for item in self.items(sorted=1): + if feed_count < 1: + break + elif item.id in feed_items: + feed_count -= 1 + elif item._channel.url_status != '226': + del(self._items[item.id]) + self._expired.append(item) + log.debug("Removed expired or replaced item <%s>", item.id) + + def get_name(self, key): + """Return the key containing the name.""" + for key in ("name", "title"): + if self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_string(key) + + return "" + +class NewsItem(cache.CachedInfo): + """An item of news. + + This class represents a single item of news on a channel. They're + created by members of the Channel class and accessible through it. + + Properties: + id Channel-unique identifier for this item. + id_hash Relatively short, printable cryptographic hash of id + date Corrected UTC-Normalised update time, for sorting. + order Order in which items on the same date can be sorted. + hidden Item should be hidden (True if exists). + + title One-line title (*). + link Link to the original format text (*). + summary Short first-page summary (*). + content Full HTML content. + + modified Date the item claims to have been modified (*). + issued Date the item claims to have been issued (*). + created Date the item claims to have been created (*). + expired Date the item claims to expire (*). + + author Name of the author (*). + publisher Name of the publisher (*). + category Category name (*). + comments Link to a page to enter comments (*). + license Link to the licence for the content (*). + source_name Name of the original source of this item (*). + source_link Link to the original source of this item (*). + + Properties marked (*) will only be present if the original feed + contained them. Note that the various optional date fields are + simply claims made by the item and parsed from the information + given, 'date' is a far more reliable source of information. + + Some feeds may define additional properties to those above. + """ + IGNORE_KEYS = ("categories", "contributors", "enclosures", "links", + "guidislink", "date", "tags") + + def __init__(self, channel, id_): + cache.CachedInfo.__init__(self, channel._cache, id_) + + self._channel = channel + self.id = id_ + self.id_hash = md5.new(id_).hexdigest() + self.date = None + self.order = None + self.content = None + self.cache_read() + + def update(self, entry): + """Update the item from the feedparser entry given.""" + for key in entry.keys(): + if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS: + # Ignored fields + pass + elif entry.has_key(key + "_parsed"): + # Ignore unparsed date fields + pass + elif key.endswith("_detail"): + # retain name, email, and language sub-fields + if entry[key].has_key('name') and entry[key].name: + self.set_as_string(key.replace("_detail","_name"), \ + entry[key].name) + if entry[key].has_key('email') and entry[key].email: + self.set_as_string(key.replace("_detail","_email"), \ + entry[key].email) + if entry[key].has_key('language') and entry[key].language and \ + (not self._channel.has_key('language') or \ + entry[key].language != self._channel.language): + self.set_as_string(key.replace("_detail","_language"), \ + entry[key].language) + elif key.endswith("_parsed"): + # Date fields + if entry[key] is not None: + self.set_as_date(key[:-len("_parsed")], entry[key]) + elif key == "source": + # Source field: save both url and value + if entry[key].has_key("value"): + self.set_as_string(key + "_name", entry[key].value) + if entry[key].has_key("url"): + self.set_as_string(key + "_link", entry[key].url) + elif key == "content": + # Content field: concatenate the values + value = "" + for item in entry[key]: + if item.type == 'text/html': + item.value = sanitize.HTML(item.value) + elif item.type == 'text/plain': + item.value = escape(item.value) + if item.has_key('language') and item.language and \ + (not self._channel.has_key('language') or + item.language != self._channel.language) : + self.set_as_string(key + "_language", item.language) + value += cache.utf8(item.value) + self.set_as_string(key, value) + elif isinstance(entry[key], (str, unicode)): + # String fields + try: + detail = key + '_detail' + if entry.has_key(detail): + if entry[detail].has_key('type'): + if entry[detail].type == 'text/html': + entry[key] = sanitize.HTML(entry[key]) + elif entry[detail].type == 'text/plain': + entry[key] = escape(entry[key]) + self.set_as_string(key, entry[key]) + except KeyboardInterrupt: + raise + except: + log.exception("Ignored '%s' of <%s>, unknown format", + key, self.id) + + # Generate the date field if we need to + self.get_date("date") + + def get_date(self, key): + """Get (or update) the date key. + + We check whether the date the entry claims to have been changed is + since we last updated this feed and when we pulled the feed off the + site. + + If it is then it's probably not bogus, and we'll sort accordingly. + + If it isn't then we bound it appropriately, this ensures that + entries appear in posting sequence but don't overlap entries + added in previous updates and don't creep into the next one. + """ + + for other_key in ("updated", "modified", "published", "issued", "created"): + if self.has_key(other_key): + date = self.get_as_date(other_key) + break + else: + date = None + + if date is not None: + if date > self._channel.updated: + date = self._channel.updated +# elif date < self._channel.last_updated: +# date = self._channel.updated + elif self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_date(key) + else: + date = self._channel.updated + + self.set_as_date(key, date) + return date + + def get_content(self, key): + """Return the key containing the content.""" + for key in ("content", "tagline", "summary"): + if self.has_key(key) and self.key_type(key) != self.NULL: + return self.get_as_string(key) + + return "" diff --git a/DJAGEN/trunk/djagen/gezegen/planet/atomstyler.py b/DJAGEN/trunk/djagen/gezegen/planet/atomstyler.py new file mode 100755 index 0000000..9220702 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/atomstyler.py @@ -0,0 +1,124 @@ +from xml.dom import minidom, Node +from urlparse import urlparse, urlunparse +from xml.parsers.expat import ExpatError +from htmlentitydefs import name2codepoint +import re + +# select and apply an xml:base for this entry +class relativize: + def __init__(self, parent): + self.score = {} + self.links = [] + self.collect_and_tally(parent) + self.base = self.select_optimal_base() + if self.base: + if not parent.hasAttribute('xml:base'): + self.rebase(parent) + parent.setAttribute('xml:base', self.base) + + # collect and tally cite, href and src attributes + def collect_and_tally(self,parent): + uri = None + if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') + if parent.hasAttribute('href'): uri=parent.getAttribute('href') + if parent.hasAttribute('src'): uri=parent.getAttribute('src') + + if uri: + parts=urlparse(uri) + if parts[0].lower() == 'http': + parts = (parts[1]+parts[2]).split('/') + base = None + for i in range(1,len(parts)): + base = tuple(parts[0:i]) + self.score[base] = self.score.get(base,0) + len(base) + if base and base not in self.links: self.links.append(base) + + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + self.collect_and_tally(node) + + # select the xml:base with the highest score + def select_optimal_base(self): + if not self.score: return None + for link in self.links: + self.score[link] = 0 + winner = max(self.score.values()) + if not winner: return None + for key in self.score.keys(): + if self.score[key] == winner: + if winner == len(key): return None + return urlunparse(('http', key[0], '/'.join(key[1:]), '', '', '')) + '/' + + # rewrite cite, href and src attributes using this base + def rebase(self,parent): + uri = None + if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') + if parent.hasAttribute('href'): uri=parent.getAttribute('href') + if parent.hasAttribute('src'): uri=parent.getAttribute('src') + if uri and uri.startswith(self.base): + uri = uri[len(self.base):] or '.' + if parent.hasAttribute('href'): uri=parent.setAttribute('href', uri) + if parent.hasAttribute('src'): uri=parent.setAttribute('src', uri) + + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + self.rebase(node) + +# convert type="html" to type="plain" or type="xhtml" as appropriate +def retype(parent): + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + + if node.hasAttribute('type') and node.getAttribute('type') == 'html': + if len(node.childNodes)==0: + node.removeAttribute('type') + elif len(node.childNodes)==1: + + # replace html entity defs with utf-8 + chunks=re.split('&(\w+);', node.childNodes[0].nodeValue) + for i in range(1,len(chunks),2): + if chunks[i] in ['amp', 'lt', 'gt', 'apos', 'quot']: + chunks[i] ='&' + chunks[i] +';' + elif chunks[i] in name2codepoint: + chunks[i]=unichr(name2codepoint[chunks[i]]) + else: + chunks[i]='&' + chunks[i] + ';' + text = u"".join(chunks) + + try: + # see if the resulting text is a well-formed XML fragment + div = '
    %s
    ' + data = minidom.parseString((div % text.encode('utf-8'))) + + if text.find('<') < 0: + # plain text + node.removeAttribute('type') + text = data.documentElement.childNodes[0].nodeValue + node.childNodes[0].replaceWholeText(text) + + elif len(text) > 80: + # xhtml + node.setAttribute('type', 'xhtml') + node.removeChild(node.childNodes[0]) + node.appendChild(data.documentElement) + + except ExpatError: + # leave as html + pass + + else: + # recurse + retype(node) + + if parent.nodeName == 'entry': + relativize(parent) + +if __name__ == '__main__': + + # run styler on each file mention on the command line + import sys + for feed in sys.argv[1:]: + doc = minidom.parse(feed) + doc.normalize() + retype(doc.documentElement) + open(feed,'w').write(doc.toxml('utf-8')) diff --git a/DJAGEN/trunk/djagen/gezegen/planet/cache.py b/DJAGEN/trunk/djagen/gezegen/planet/cache.py new file mode 100755 index 0000000..dfc529b --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/cache.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +"""Item cache. + +Between runs of Planet we need somewhere to store the feed information +we parsed, this is so we don't lose information when a particular feed +goes away or is too short to hold enough items. + +This module provides the code to handle this cache transparently enough +that the rest of the code can take the persistance for granted. +""" + +import os +import re + + +# Regular expressions to sanitise cache filenames +re_url_scheme = re.compile(r'^[^:]*://') +re_slash = re.compile(r'[?/]+') +re_initial_cruft = re.compile(r'^[,.]*') +re_final_cruft = re.compile(r'[,.]*$') + + +class CachedInfo: + """Cached information. + + This class is designed to hold information that is stored in a cache + between instances. It can act both as a dictionary (c['foo']) and + as an object (c.foo) to get and set values and supports both string + and date values. + + If you wish to support special fields you can derive a class off this + and implement get_FIELD and set_FIELD functions which will be + automatically called. + """ + STRING = "string" + DATE = "date" + NULL = "null" + + def __init__(self, cache, id_, root=0): + self._type = {} + self._value = {} + self._cached = {} + + self._cache = cache + self._id = id_.replace(" ", "%20") + self._root = root + + def cache_key(self, key): + """Return the cache key name for the given key.""" + key = key.replace(" ", "_") + if self._root: + return key + else: + return self._id + " " + key + + def cache_read(self): + """Read information from the cache.""" + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + if self._cache.has_key(keys_key): + keys = self._cache[keys_key].split(" ") + else: + return + + for key in keys: + cache_key = self.cache_key(key) + if not self._cached.has_key(key) or self._cached[key]: + # Key either hasn't been loaded, or is one for the cache + self._value[key] = self._cache[cache_key] + self._type[key] = self._cache[cache_key + " type"] + self._cached[key] = 1 + + def cache_write(self, sync=1): + """Write information to the cache.""" + self.cache_clear(sync=0) + + keys = [] + for key in self.keys(): + cache_key = self.cache_key(key) + if not self._cached[key]: + if self._cache.has_key(cache_key): + # Non-cached keys need to be cleared + del(self._cache[cache_key]) + del(self._cache[cache_key + " type"]) + continue + + keys.append(key) + self._cache[cache_key] = self._value[key] + self._cache[cache_key + " type"] = self._type[key] + + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + self._cache[keys_key] = " ".join(keys) + if sync: + self._cache.sync() + + def cache_clear(self, sync=1): + """Remove information from the cache.""" + if self._root: + keys_key = " keys" + else: + keys_key = self._id + + if self._cache.has_key(keys_key): + keys = self._cache[keys_key].split(" ") + del(self._cache[keys_key]) + else: + return + + for key in keys: + cache_key = self.cache_key(key) + del(self._cache[cache_key]) + del(self._cache[cache_key + " type"]) + + if sync: + self._cache.sync() + + def has_key(self, key): + """Check whether the key exists.""" + key = key.replace(" ", "_") + return self._value.has_key(key) + + def key_type(self, key): + """Return the key type.""" + key = key.replace(" ", "_") + return self._type[key] + + def set(self, key, value, cached=1): + """Set the value of the given key. + + If a set_KEY function exists that is called otherwise the + string function is called and the date function if that fails + (it nearly always will). + """ + key = key.replace(" ", "_") + + try: + func = getattr(self, "set_" + key) + except AttributeError: + pass + else: + return func(key, value) + + if value == None: + return self.set_as_null(key, value) + else: + try: + return self.set_as_string(key, value) + except TypeError: + return self.set_as_date(key, value) + + def get(self, key): + """Return the value of the given key. + + If a get_KEY function exists that is called otherwise the + correctly typed function is called if that exists. + """ + key = key.replace(" ", "_") + + try: + func = getattr(self, "get_" + key) + except AttributeError: + pass + else: + return func(key) + + try: + func = getattr(self, "get_as_" + self._type[key]) + except AttributeError: + pass + else: + return func(key) + + return self._value[key] + + def set_as_string(self, key, value, cached=1): + """Set the key to the string value. + + The value is converted to UTF-8 if it is a Unicode string, otherwise + it's assumed to have failed decoding (feedparser tries pretty hard) + so has all non-ASCII characters stripped. + """ + value = utf8(value) + + key = key.replace(" ", "_") + self._value[key] = value + self._type[key] = self.STRING + self._cached[key] = cached + + def get_as_string(self, key): + """Return the key as a string value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + return self._value[key] + + def set_as_date(self, key, value, cached=1): + """Set the key to the date value. + + The date should be a 9-item tuple as returned by time.gmtime(). + """ + value = " ".join([ str(s) for s in value ]) + + key = key.replace(" ", "_") + self._value[key] = value + self._type[key] = self.DATE + self._cached[key] = cached + + def get_as_date(self, key): + """Return the key as a date value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + value = self._value[key] + return tuple([ int(i) for i in value.split(" ") ]) + + def set_as_null(self, key, value, cached=1): + """Set the key to the null value. + + This only exists to make things less magic. + """ + key = key.replace(" ", "_") + self._value[key] = "" + self._type[key] = self.NULL + self._cached[key] = cached + + def get_as_null(self, key): + """Return the key as the null value.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + return None + + def del_key(self, key): + """Delete the given key.""" + key = key.replace(" ", "_") + if not self.has_key(key): + raise KeyError, key + + del(self._value[key]) + del(self._type[key]) + del(self._cached[key]) + + def keys(self): + """Return the list of cached keys.""" + return self._value.keys() + + def __iter__(self): + """Iterate the cached keys.""" + return iter(self._value.keys()) + + # Special methods + __contains__ = has_key + __setitem__ = set_as_string + __getitem__ = get + __delitem__ = del_key + __delattr__ = del_key + + def __setattr__(self, key, value): + if key.startswith("_"): + self.__dict__[key] = value + else: + self.set(key, value) + + def __getattr__(self, key): + if self.has_key(key): + return self.get(key) + else: + raise AttributeError, key + + +def filename(directory, filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + filename = re_initial_cruft.sub("", filename) + filename = re_final_cruft.sub("", filename) + + return os.path.join(directory, filename) + +def utf8(value): + """Return the value as a UTF-8 string.""" + if type(value) == type(u''): + return value.encode("utf-8") + else: + try: + return unicode(value, "utf-8").encode("utf-8") + except UnicodeError: + try: + return unicode(value, "iso-8859-1").encode("utf-8") + except UnicodeError: + return unicode(value, "ascii", "replace").encode("utf-8") diff --git a/DJAGEN/trunk/djagen/gezegen/planet/compat_logging/__init__.py b/DJAGEN/trunk/djagen/gezegen/planet/compat_logging/__init__.py new file mode 100755 index 0000000..3bd0c6d --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/compat_logging/__init__.py @@ -0,0 +1,1196 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'sys._getframe()' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, os, types, time, string, cStringIO + +try: + import thread + import threading +except ImportError: + thread = None + +__author__ = "Vinay Sajip " +__status__ = "beta" +__version__ = "0.4.8.1" +__date__ = "26 June 2003" + +#--------------------------------------------------------------------------- +# Miscellaneous module data +#--------------------------------------------------------------------------- + +# +#_srcfile is used when walking the stack to check when we've got the first +# caller stack frame. +# +if string.lower(__file__[-4:]) in ['.pyc', '.pyo']: + _srcfile = __file__[:-4] + '.py' +else: + _srcfile = __file__ +_srcfile = os.path.normcase(_srcfile) + +# _srcfile is only used in conjunction with sys._getframe(). +# To provide compatibility with older versions of Python, set _srcfile +# to None if _getframe() is not available; this value will prevent +# findCaller() from being called. +if not hasattr(sys, "_getframe"): + _srcfile = None + +# +#_startTime is used as the base when calculating the relative time of events +# +_startTime = time.time() + +# +#raiseExceptions is used to see if exceptions during handling should be +#propagated +# +raiseExceptions = 1 + +#--------------------------------------------------------------------------- +# Level related stuff +#--------------------------------------------------------------------------- +# +# Default levels and level names, these can be replaced with any positive set +# of values having corresponding names. There is a pseudo-level, NOTSET, which +# is only really there as a lower limit for user-defined levels. Handlers and +# loggers are initialized with NOTSET so that they will log all messages, even +# at user-defined levels. +# +CRITICAL = 50 +FATAL = CRITICAL +ERROR = 40 +WARNING = 30 +WARN = WARNING +INFO = 20 +DEBUG = 10 +NOTSET = 0 + +_levelNames = { + CRITICAL : 'CRITICAL', + ERROR : 'ERROR', + WARNING : 'WARNING', + INFO : 'INFO', + DEBUG : 'DEBUG', + NOTSET : 'NOTSET', + 'CRITICAL' : CRITICAL, + 'ERROR' : ERROR, + 'WARN' : WARNING, + 'WARNING' : WARNING, + 'INFO' : INFO, + 'DEBUG' : DEBUG, + 'NOTSET' : NOTSET, +} + +def getLevelName(level): + """ + Return the textual representation of logging level 'level'. + + If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, + INFO, DEBUG) then you get the corresponding string. If you have + associated levels with names using addLevelName then the name you have + associated with 'level' is returned. Otherwise, the string + "Level %s" % level is returned. + """ + return _levelNames.get(level, ("Level %s" % level)) + +def addLevelName(level, levelName): + """ + Associate 'levelName' with 'level'. + + This is used when converting levels to text during message formatting. + """ + _acquireLock() + try: #unlikely to cause an exception, but you never know... + _levelNames[level] = levelName + _levelNames[levelName] = level + finally: + _releaseLock() + +#--------------------------------------------------------------------------- +# Thread-related stuff +#--------------------------------------------------------------------------- + +# +#_lock is used to serialize access to shared data structures in this module. +#This needs to be an RLock because fileConfig() creates Handlers and so +#might arbitrary user threads. Since Handler.__init__() updates the shared +#dictionary _handlers, it needs to acquire the lock. But if configuring, +#the lock would already have been acquired - so we need an RLock. +#The same argument applies to Loggers and Manager.loggerDict. +# +_lock = None + +def _acquireLock(): + """ + Acquire the module-level lock for serializing access to shared data. + + This should be released with _releaseLock(). + """ + global _lock + if (not _lock) and thread: + _lock = threading.RLock() + if _lock: + _lock.acquire() + +def _releaseLock(): + """ + Release the module-level lock acquired by calling _acquireLock(). + """ + if _lock: + _lock.release() + +#--------------------------------------------------------------------------- +# The logging record +#--------------------------------------------------------------------------- + +class LogRecord: + """ + A LogRecord instance represents an event being logged. + + LogRecord instances are created every time something is logged. They + contain all the information pertinent to the event being logged. The + main information passed in is in msg and args, which are combined + using str(msg) % args to create the message field of the record. The + record also includes information such as when the record was created, + the source line where the logging call was made, and any exception + information to be logged. + """ + def __init__(self, name, level, pathname, lineno, msg, args, exc_info): + """ + Initialize a logging record with interesting information. + """ + ct = time.time() + self.name = name + self.msg = msg + self.args = args + self.levelname = getLevelName(level) + self.levelno = level + self.pathname = pathname + try: + self.filename = os.path.basename(pathname) + self.module = os.path.splitext(self.filename)[0] + except: + self.filename = pathname + self.module = "Unknown module" + self.exc_info = exc_info + self.lineno = lineno + self.created = ct + self.msecs = (ct - long(ct)) * 1000 + self.relativeCreated = (self.created - _startTime) * 1000 + if thread: + self.thread = thread.get_ident() + else: + self.thread = None + if hasattr(os, 'getpid'): + self.process = os.getpid() + else: + self.process = None + + def __str__(self): + return ''%(self.name, self.levelno, + self.pathname, self.lineno, self.msg) + + def getMessage(self): + """ + Return the message for this LogRecord. + + Return the message for this LogRecord after merging any user-supplied + arguments with the message. + """ + if not hasattr(types, "UnicodeType"): #if no unicode support... + msg = str(self.msg) + else: + try: + msg = str(self.msg) + except UnicodeError: + msg = self.msg #Defer encoding till later + if self.args: + msg = msg % self.args + return msg + +def makeLogRecord(dict): + """ + Make a LogRecord whose attributes are defined by the specified dictionary, + This function is useful for converting a logging event received over + a socket connection (which is sent as a dictionary) into a LogRecord + instance. + """ + rv = LogRecord(None, None, "", 0, "", (), None) + rv.__dict__.update(dict) + return rv + +#--------------------------------------------------------------------------- +# Formatter classes and functions +#--------------------------------------------------------------------------- + +class Formatter: + """ + Formatter instances are used to convert a LogRecord to text. + + Formatters need to know how a LogRecord is constructed. They are + responsible for converting a LogRecord to (usually) a string which can + be interpreted by either a human or an external system. The base Formatter + allows a formatting string to be specified. If none is supplied, the + default value of "%s(message)\\n" is used. + + The Formatter can be initialized with a format string which makes use of + knowledge of the LogRecord attributes - e.g. the default value mentioned + above makes use of the fact that the user's message and arguments are pre- + formatted into a LogRecord's message attribute. Currently, the useful + attributes in a LogRecord are described by: + + %(name)s Name of the logger (logging channel) + %(levelno)s Numeric logging level for the message (DEBUG, INFO, + WARNING, ERROR, CRITICAL) + %(levelname)s Text logging level for the message ("DEBUG", "INFO", + "WARNING", "ERROR", "CRITICAL") + %(pathname)s Full pathname of the source file where the logging + call was issued (if available) + %(filename)s Filename portion of pathname + %(module)s Module (name portion of filename) + %(lineno)d Source line number where the logging call was issued + (if available) + %(created)f Time when the LogRecord was created (time.time() + return value) + %(asctime)s Textual time when the LogRecord was created + %(msecs)d Millisecond portion of the creation time + %(relativeCreated)d Time in milliseconds when the LogRecord was created, + relative to the time the logging module was loaded + (typically at application startup time) + %(thread)d Thread ID (if available) + %(process)d Process ID (if available) + %(message)s The result of record.getMessage(), computed just as + the record is emitted + """ + + converter = time.localtime + + def __init__(self, fmt=None, datefmt=None): + """ + Initialize the formatter with specified format strings. + + Initialize the formatter either with the specified format string, or a + default as described above. Allow for specialized date formatting with + the optional datefmt argument (if omitted, you get the ISO8601 format). + """ + if fmt: + self._fmt = fmt + else: + self._fmt = "%(message)s" + self.datefmt = datefmt + + def formatTime(self, record, datefmt=None): + """ + Return the creation time of the specified LogRecord as formatted text. + + This method should be called from format() by a formatter which + wants to make use of a formatted time. This method can be overridden + in formatters to provide for any specific requirement, but the + basic behaviour is as follows: if datefmt (a string) is specified, + it is used with time.strftime() to format the creation time of the + record. Otherwise, the ISO8601 format is used. The resulting + string is returned. This function uses a user-configurable function + to convert the creation time to a tuple. By default, time.localtime() + is used; to change this for a particular formatter instance, set the + 'converter' attribute to a function with the same signature as + time.localtime() or time.gmtime(). To change it for all formatters, + for example if you want all logging times to be shown in GMT, + set the 'converter' attribute in the Formatter class. + """ + ct = self.converter(record.created) + if datefmt: + s = time.strftime(datefmt, ct) + else: + t = time.strftime("%Y-%m-%d %H:%M:%S", ct) + s = "%s,%03d" % (t, record.msecs) + return s + + def formatException(self, ei): + """ + Format and return the specified exception information as a string. + + This default implementation just uses + traceback.print_exception() + """ + import traceback + sio = cStringIO.StringIO() + traceback.print_exception(ei[0], ei[1], ei[2], None, sio) + s = sio.getvalue() + sio.close() + if s[-1] == "\n": + s = s[:-1] + return s + + def format(self, record): + """ + Format the specified record as text. + + The record's attribute dictionary is used as the operand to a + string formatting operation which yields the returned string. + Before formatting the dictionary, a couple of preparatory steps + are carried out. The message attribute of the record is computed + using LogRecord.getMessage(). If the formatting string contains + "%(asctime)", formatTime() is called to format the event time. + If there is exception information, it is formatted using + formatException() and appended to the message. + """ + record.message = record.getMessage() + if string.find(self._fmt,"%(asctime)") >= 0: + record.asctime = self.formatTime(record, self.datefmt) + s = self._fmt % record.__dict__ + if record.exc_info: + if s[-1] != "\n": + s = s + "\n" + s = s + self.formatException(record.exc_info) + return s + +# +# The default formatter to use when no other is specified +# +_defaultFormatter = Formatter() + +class BufferingFormatter: + """ + A formatter suitable for formatting a number of records. + """ + def __init__(self, linefmt=None): + """ + Optionally specify a formatter which will be used to format each + individual record. + """ + if linefmt: + self.linefmt = linefmt + else: + self.linefmt = _defaultFormatter + + def formatHeader(self, records): + """ + Return the header string for the specified records. + """ + return "" + + def formatFooter(self, records): + """ + Return the footer string for the specified records. + """ + return "" + + def format(self, records): + """ + Format the specified records and return the result as a string. + """ + rv = "" + if len(records) > 0: + rv = rv + self.formatHeader(records) + for record in records: + rv = rv + self.linefmt.format(record) + rv = rv + self.formatFooter(records) + return rv + +#--------------------------------------------------------------------------- +# Filter classes and functions +#--------------------------------------------------------------------------- + +class Filter: + """ + Filter instances are used to perform arbitrary filtering of LogRecords. + + Loggers and Handlers can optionally use Filter instances to filter + records as desired. The base filter class only allows events which are + below a certain point in the logger hierarchy. For example, a filter + initialized with "A.B" will allow events logged by loggers "A.B", + "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If + initialized with the empty string, all events are passed. + """ + def __init__(self, name=''): + """ + Initialize a filter. + + Initialize with the name of the logger which, together with its + children, will have its events allowed through the filter. If no + name is specified, allow every event. + """ + self.name = name + self.nlen = len(name) + + def filter(self, record): + """ + Determine if the specified record is to be logged. + + Is the specified record to be logged? Returns 0 for no, nonzero for + yes. If deemed appropriate, the record may be modified in-place. + """ + if self.nlen == 0: + return 1 + elif self.name == record.name: + return 1 + elif string.find(record.name, self.name, 0, self.nlen) != 0: + return 0 + return (record.name[self.nlen] == ".") + +class Filterer: + """ + A base class for loggers and handlers which allows them to share + common code. + """ + def __init__(self): + """ + Initialize the list of filters to be an empty list. + """ + self.filters = [] + + def addFilter(self, filter): + """ + Add the specified filter to this handler. + """ + if not (filter in self.filters): + self.filters.append(filter) + + def removeFilter(self, filter): + """ + Remove the specified filter from this handler. + """ + if filter in self.filters: + self.filters.remove(filter) + + def filter(self, record): + """ + Determine if a record is loggable by consulting all the filters. + + The default is to allow the record to be logged; any filter can veto + this and the record is then dropped. Returns a zero value if a record + is to be dropped, else non-zero. + """ + rv = 1 + for f in self.filters: + if not f.filter(record): + rv = 0 + break + return rv + +#--------------------------------------------------------------------------- +# Handler classes and functions +#--------------------------------------------------------------------------- + +_handlers = {} #repository of handlers (for flushing when shutdown called) + +class Handler(Filterer): + """ + Handler instances dispatch logging events to specific destinations. + + The base handler class. Acts as a placeholder which defines the Handler + interface. Handlers can optionally use Formatter instances to format + records as desired. By default, no formatter is specified; in this case, + the 'raw' message as determined by record.message is logged. + """ + def __init__(self, level=NOTSET): + """ + Initializes the instance - basically setting the formatter to None + and the filter list to empty. + """ + Filterer.__init__(self) + self.level = level + self.formatter = None + #get the module data lock, as we're updating a shared structure. + _acquireLock() + try: #unlikely to raise an exception, but you never know... + _handlers[self] = 1 + finally: + _releaseLock() + self.createLock() + + def createLock(self): + """ + Acquire a thread lock for serializing access to the underlying I/O. + """ + if thread: + self.lock = thread.allocate_lock() + else: + self.lock = None + + def acquire(self): + """ + Acquire the I/O thread lock. + """ + if self.lock: + self.lock.acquire() + + def release(self): + """ + Release the I/O thread lock. + """ + if self.lock: + self.lock.release() + + def setLevel(self, level): + """ + Set the logging level of this handler. + """ + self.level = level + + def format(self, record): + """ + Format the specified record. + + If a formatter is set, use it. Otherwise, use the default formatter + for the module. + """ + if self.formatter: + fmt = self.formatter + else: + fmt = _defaultFormatter + return fmt.format(record) + + def emit(self, record): + """ + Do whatever it takes to actually log the specified logging record. + + This version is intended to be implemented by subclasses and so + raises a NotImplementedError. + """ + raise NotImplementedError, 'emit must be implemented '\ + 'by Handler subclasses' + + def handle(self, record): + """ + Conditionally emit the specified logging record. + + Emission depends on filters which may have been added to the handler. + Wrap the actual emission of the record with acquisition/release of + the I/O thread lock. Returns whether the filter passed the record for + emission. + """ + rv = self.filter(record) + if rv: + self.acquire() + try: + self.emit(record) + finally: + self.release() + return rv + + def setFormatter(self, fmt): + """ + Set the formatter for this handler. + """ + self.formatter = fmt + + def flush(self): + """ + Ensure all logging output has been flushed. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def close(self): + """ + Tidy up any resources used by the handler. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def handleError(self, record): + """ + Handle errors which occur during an emit() call. + + This method should be called from handlers when an exception is + encountered during an emit() call. If raiseExceptions is false, + exceptions get silently ignored. This is what is mostly wanted + for a logging system - most users will not care about errors in + the logging system, they are more interested in application errors. + You could, however, replace this with a custom handler if you wish. + The record which was being processed is passed in to this method. + """ + if raiseExceptions: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + +class StreamHandler(Handler): + """ + A handler class which writes logging records, appropriately formatted, + to a stream. Note that this class does not close the stream, as + sys.stdout or sys.stderr may be used. + """ + def __init__(self, strm=None): + """ + Initialize the handler. + + If strm is not specified, sys.stderr is used. + """ + Handler.__init__(self) + if not strm: + strm = sys.stderr + self.stream = strm + self.formatter = None + + def flush(self): + """ + Flushes the stream. + """ + self.stream.flush() + + def emit(self, record): + """ + Emit a record. + + If a formatter is specified, it is used to format the record. + The record is then written to the stream with a trailing newline + [N.B. this may be removed depending on feedback]. If exception + information is present, it is formatted using + traceback.print_exception and appended to the stream. + """ + try: + msg = self.format(record) + if not hasattr(types, "UnicodeType"): #if no unicode support... + self.stream.write("%s\n" % msg) + else: + try: + self.stream.write("%s\n" % msg) + except UnicodeError: + self.stream.write("%s\n" % msg.encode("UTF-8")) + self.flush() + except: + self.handleError(record) + +class FileHandler(StreamHandler): + """ + A handler class which writes formatted logging records to disk files. + """ + def __init__(self, filename, mode="a"): + """ + Open the specified file and use it as the stream for logging. + """ + StreamHandler.__init__(self, open(filename, mode)) + self.baseFilename = filename + self.mode = mode + + def close(self): + """ + Closes the stream. + """ + self.stream.close() + +#--------------------------------------------------------------------------- +# Manager classes and functions +#--------------------------------------------------------------------------- + +class PlaceHolder: + """ + PlaceHolder instances are used in the Manager logger hierarchy to take + the place of nodes for which no loggers have been defined [FIXME add + example]. + """ + def __init__(self, alogger): + """ + Initialize with the specified logger being a child of this placeholder. + """ + self.loggers = [alogger] + + def append(self, alogger): + """ + Add the specified logger as a child of this placeholder. + """ + if alogger not in self.loggers: + self.loggers.append(alogger) + +# +# Determine which class to use when instantiating loggers. +# +_loggerClass = None + +def setLoggerClass(klass): + """ + Set the class to be used when instantiating a logger. The class should + define __init__() such that only a name argument is required, and the + __init__() should call Logger.__init__() + """ + if klass != Logger: + if not issubclass(klass, Logger): + raise TypeError, "logger not derived from logging.Logger: " + \ + klass.__name__ + global _loggerClass + _loggerClass = klass + +class Manager: + """ + There is [under normal circumstances] just one Manager instance, which + holds the hierarchy of loggers. + """ + def __init__(self, rootnode): + """ + Initialize the manager with the root node of the logger hierarchy. + """ + self.root = rootnode + self.disable = 0 + self.emittedNoHandlerWarning = 0 + self.loggerDict = {} + + def getLogger(self, name): + """ + Get a logger with the specified name (channel name), creating it + if it doesn't yet exist. + + If a PlaceHolder existed for the specified name [i.e. the logger + didn't exist but a child of it did], replace it with the created + logger and fix up the parent/child references which pointed to the + placeholder to now point to the logger. + """ + rv = None + _acquireLock() + try: + if self.loggerDict.has_key(name): + rv = self.loggerDict[name] + if isinstance(rv, PlaceHolder): + ph = rv + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupChildren(ph, rv) + self._fixupParents(rv) + else: + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupParents(rv) + finally: + _releaseLock() + return rv + + def _fixupParents(self, alogger): + """ + Ensure that there are either loggers or placeholders all the way + from the specified logger to the root of the logger hierarchy. + """ + name = alogger.name + i = string.rfind(name, ".") + rv = None + while (i > 0) and not rv: + substr = name[:i] + if not self.loggerDict.has_key(substr): + self.loggerDict[substr] = PlaceHolder(alogger) + else: + obj = self.loggerDict[substr] + if isinstance(obj, Logger): + rv = obj + else: + assert isinstance(obj, PlaceHolder) + obj.append(alogger) + i = string.rfind(name, ".", 0, i - 1) + if not rv: + rv = self.root + alogger.parent = rv + + def _fixupChildren(self, ph, alogger): + """ + Ensure that children of the placeholder ph are connected to the + specified logger. + """ + for c in ph.loggers: + if string.find(c.parent.name, alogger.name) <> 0: + alogger.parent = c.parent + c.parent = alogger + +#--------------------------------------------------------------------------- +# Logger classes and functions +#--------------------------------------------------------------------------- + +class Logger(Filterer): + """ + Instances of the Logger class represent a single logging channel. A + "logging channel" indicates an area of an application. Exactly how an + "area" is defined is up to the application developer. Since an + application can have any number of areas, logging channels are identified + by a unique string. Application areas can be nested (e.g. an area + of "input processing" might include sub-areas "read CSV files", "read + XLS files" and "read Gnumeric files"). To cater for this natural nesting, + channel names are organized into a namespace hierarchy where levels are + separated by periods, much like the Java or Python package namespace. So + in the instance given above, channel names might be "input" for the upper + level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. + There is no arbitrary limit to the depth of nesting. + """ + def __init__(self, name, level=NOTSET): + """ + Initialize the logger with a name and an optional level. + """ + Filterer.__init__(self) + self.name = name + self.level = level + self.parent = None + self.propagate = 1 + self.handlers = [] + self.disabled = 0 + + def setLevel(self, level): + """ + Set the logging level of this logger. + """ + self.level = level + +# def getRoot(self): +# """ +# Get the root of the logger hierarchy. +# """ +# return Logger.root + + def debug(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'DEBUG'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) + """ + if self.manager.disable >= DEBUG: + return + if DEBUG >= self.getEffectiveLevel(): + apply(self._log, (DEBUG, msg, args), kwargs) + + def info(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'INFO'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.info("Houston, we have a %s", "interesting problem", exc_info=1) + """ + if self.manager.disable >= INFO: + return + if INFO >= self.getEffectiveLevel(): + apply(self._log, (INFO, msg, args), kwargs) + + def warning(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'WARNING'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) + """ + if self.manager.disable >= WARNING: + return + if self.isEnabledFor(WARNING): + apply(self._log, (WARNING, msg, args), kwargs) + + warn = warning + + def error(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'ERROR'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.error("Houston, we have a %s", "major problem", exc_info=1) + """ + if self.manager.disable >= ERROR: + return + if self.isEnabledFor(ERROR): + apply(self._log, (ERROR, msg, args), kwargs) + + def exception(self, msg, *args): + """ + Convenience method for logging an ERROR with exception information. + """ + apply(self.error, (msg,) + args, {'exc_info': 1}) + + def critical(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'CRITICAL'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.critical("Houston, we have a %s", "major disaster", exc_info=1) + """ + if self.manager.disable >= CRITICAL: + return + if CRITICAL >= self.getEffectiveLevel(): + apply(self._log, (CRITICAL, msg, args), kwargs) + + fatal = critical + + def log(self, level, msg, *args, **kwargs): + """ + Log 'msg % args' with the severity 'level'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.log(level, "We have a %s", "mysterious problem", exc_info=1) + """ + if self.manager.disable >= level: + return + if self.isEnabledFor(level): + apply(self._log, (level, msg, args), kwargs) + + def findCaller(self): + """ + Find the stack frame of the caller so that we can note the source + file name and line number. + """ + f = sys._getframe(1) + while 1: + co = f.f_code + filename = os.path.normcase(co.co_filename) + if filename == _srcfile: + f = f.f_back + continue + return filename, f.f_lineno + + def makeRecord(self, name, level, fn, lno, msg, args, exc_info): + """ + A factory method which can be overridden in subclasses to create + specialized LogRecords. + """ + return LogRecord(name, level, fn, lno, msg, args, exc_info) + + def _log(self, level, msg, args, exc_info=None): + """ + Low-level logging routine which creates a LogRecord and then calls + all the handlers of this logger to handle the record. + """ + if _srcfile: + fn, lno = self.findCaller() + else: + fn, lno = "", 0 + if exc_info: + exc_info = sys.exc_info() + record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info) + self.handle(record) + + def handle(self, record): + """ + Call the handlers for the specified record. + + This method is used for unpickled records received from a socket, as + well as those created locally. Logger-level filtering is applied. + """ + if (not self.disabled) and self.filter(record): + self.callHandlers(record) + + def addHandler(self, hdlr): + """ + Add the specified handler to this logger. + """ + if not (hdlr in self.handlers): + self.handlers.append(hdlr) + + def removeHandler(self, hdlr): + """ + Remove the specified handler from this logger. + """ + if hdlr in self.handlers: + #hdlr.close() + self.handlers.remove(hdlr) + + def callHandlers(self, record): + """ + Pass a record to all relevant handlers. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. If no handler was found, output a one-off error + message to sys.stderr. Stop searching up the hierarchy whenever a + logger with the "propagate" attribute set to zero is found - that + will be the last logger whose handlers are called. + """ + c = self + found = 0 + while c: + for hdlr in c.handlers: + found = found + 1 + if record.levelno >= hdlr.level: + hdlr.handle(record) + if not c.propagate: + c = None #break out + else: + c = c.parent + if (found == 0) and not self.manager.emittedNoHandlerWarning: + sys.stderr.write("No handlers could be found for logger" + " \"%s\"\n" % self.name) + self.manager.emittedNoHandlerWarning = 1 + + def getEffectiveLevel(self): + """ + Get the effective level for this logger. + + Loop through this logger and its parents in the logger hierarchy, + looking for a non-zero logging level. Return the first one found. + """ + logger = self + while logger: + if logger.level: + return logger.level + logger = logger.parent + return NOTSET + + def isEnabledFor(self, level): + """ + Is this logger enabled for level 'level'? + """ + if self.manager.disable >= level: + return 0 + return level >= self.getEffectiveLevel() + +class RootLogger(Logger): + """ + A root logger is not that different to any other logger, except that + it must have a logging level and there is only one instance of it in + the hierarchy. + """ + def __init__(self, level): + """ + Initialize the logger with the name "root". + """ + Logger.__init__(self, "root", level) + +_loggerClass = Logger + +root = RootLogger(WARNING) +Logger.root = root +Logger.manager = Manager(Logger.root) + +#--------------------------------------------------------------------------- +# Configuration classes and functions +#--------------------------------------------------------------------------- + +BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" + +def basicConfig(): + """ + Do basic configuration for the logging system by creating a + StreamHandler with a default Formatter and adding it to the + root logger. + """ + if len(root.handlers) == 0: + hdlr = StreamHandler() + fmt = Formatter(BASIC_FORMAT) + hdlr.setFormatter(fmt) + root.addHandler(hdlr) + +#--------------------------------------------------------------------------- +# Utility functions at module level. +# Basically delegate everything to the root logger. +#--------------------------------------------------------------------------- + +def getLogger(name=None): + """ + Return a logger with the specified name, creating it if necessary. + + If no name is specified, return the root logger. + """ + if name: + return Logger.manager.getLogger(name) + else: + return root + +#def getRootLogger(): +# """ +# Return the root logger. +# +# Note that getLogger('') now does the same thing, so this function is +# deprecated and may disappear in the future. +# """ +# return root + +def critical(msg, *args, **kwargs): + """ + Log a message with severity 'CRITICAL' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.critical, (msg,)+args, kwargs) + +fatal = critical + +def error(msg, *args, **kwargs): + """ + Log a message with severity 'ERROR' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.error, (msg,)+args, kwargs) + +def exception(msg, *args): + """ + Log a message with severity 'ERROR' on the root logger, + with exception information. + """ + apply(error, (msg,)+args, {'exc_info': 1}) + +def warning(msg, *args, **kwargs): + """ + Log a message with severity 'WARNING' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.warning, (msg,)+args, kwargs) + +warn = warning + +def info(msg, *args, **kwargs): + """ + Log a message with severity 'INFO' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.info, (msg,)+args, kwargs) + +def debug(msg, *args, **kwargs): + """ + Log a message with severity 'DEBUG' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.debug, (msg,)+args, kwargs) + +def disable(level): + """ + Disable all logging calls less severe than 'level'. + """ + root.manager.disable = level + +def shutdown(): + """ + Perform any cleanup actions in the logging system (e.g. flushing + buffers). + + Should be called at application exit. + """ + for h in _handlers.keys(): + h.flush() + h.close() diff --git a/DJAGEN/trunk/djagen/gezegen/planet/compat_logging/config.py b/DJAGEN/trunk/djagen/gezegen/planet/compat_logging/config.py new file mode 100755 index 0000000..d4d08f0 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/compat_logging/config.py @@ -0,0 +1,299 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, logging.handlers, string, thread, threading, socket, struct, os + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + + +DEFAULT_LOGGING_CONFIG_PORT = 9030 +if sys.platform == "win32": + RESET_ERROR = 10054 #WSAECONNRESET +else: + RESET_ERROR = 104 #ECONNRESET + +# +# The following code implements a socket listener for on-the-fly +# reconfiguration of logging. +# +# _listener holds the server object doing the listening +_listener = None + +def fileConfig(fname, defaults=None): + """ + Read the logging configuration from a ConfigParser-format file. + + This can be called several times from an application, allowing an end user + the ability to select from various pre-canned configurations (if the + developer provides a mechanism to present the choices and load the chosen + configuration). + In versions of ConfigParser which have the readfp method [typically + shipped in 2.x versions of Python], you can pass in a file-like object + rather than a filename, in which case the file-like object will be read + using readfp. + """ + import ConfigParser + + cp = ConfigParser.ConfigParser(defaults) + if hasattr(cp, 'readfp') and hasattr(fname, 'readline'): + cp.readfp(fname) + else: + cp.read(fname) + #first, do the formatters... + flist = cp.get("formatters", "keys") + if len(flist): + flist = string.split(flist, ",") + formatters = {} + for form in flist: + sectname = "formatter_%s" % form + opts = cp.options(sectname) + if "format" in opts: + fs = cp.get(sectname, "format", 1) + else: + fs = None + if "datefmt" in opts: + dfs = cp.get(sectname, "datefmt", 1) + else: + dfs = None + f = logging.Formatter(fs, dfs) + formatters[form] = f + #next, do the handlers... + #critical section... + logging._acquireLock() + try: + try: + #first, lose the existing handlers... + logging._handlers.clear() + #now set up the new ones... + hlist = cp.get("handlers", "keys") + if len(hlist): + hlist = string.split(hlist, ",") + handlers = {} + fixups = [] #for inter-handler references + for hand in hlist: + sectname = "handler_%s" % hand + klass = cp.get(sectname, "class") + opts = cp.options(sectname) + if "formatter" in opts: + fmt = cp.get(sectname, "formatter") + else: + fmt = "" + klass = eval(klass, vars(logging)) + args = cp.get(sectname, "args") + args = eval(args, vars(logging)) + h = apply(klass, args) + if "level" in opts: + level = cp.get(sectname, "level") + h.setLevel(logging._levelNames[level]) + if len(fmt): + h.setFormatter(formatters[fmt]) + #temporary hack for FileHandler and MemoryHandler. + if klass == logging.handlers.MemoryHandler: + if "target" in opts: + target = cp.get(sectname,"target") + else: + target = "" + if len(target): #the target handler may not be loaded yet, so keep for later... + fixups.append((h, target)) + handlers[hand] = h + #now all handlers are loaded, fixup inter-handler references... + for fixup in fixups: + h = fixup[0] + t = fixup[1] + h.setTarget(handlers[t]) + #at last, the loggers...first the root... + llist = cp.get("loggers", "keys") + llist = string.split(llist, ",") + llist.remove("root") + sectname = "logger_root" + root = logging.root + log = root + opts = cp.options(sectname) + if "level" in opts: + level = cp.get(sectname, "level") + log.setLevel(logging._levelNames[level]) + for h in root.handlers[:]: + root.removeHandler(h) + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + log.addHandler(handlers[hand]) + #and now the others... + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + existing = root.manager.loggerDict.keys() + #now set up the new ones... + for log in llist: + sectname = "logger_%s" % log + qn = cp.get(sectname, "qualname") + opts = cp.options(sectname) + if "propagate" in opts: + propagate = cp.getint(sectname, "propagate") + else: + propagate = 1 + logger = logging.getLogger(qn) + if qn in existing: + existing.remove(qn) + if "level" in opts: + level = cp.get(sectname, "level") + logger.setLevel(logging._levelNames[level]) + for h in logger.handlers[:]: + logger.removeHandler(h) + logger.propagate = propagate + logger.disabled = 0 + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + logger.addHandler(handlers[hand]) + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + for log in existing: + root.manager.loggerDict[log].disabled = 1 + except: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + finally: + logging._releaseLock() + +def listen(port=DEFAULT_LOGGING_CONFIG_PORT): + """ + Start up a socket server on the specified port, and listen for new + configurations. + + These will be sent as a file suitable for processing by fileConfig(). + Returns a Thread object on which you can call start() to start the server, + and which you can join() when appropriate. To stop the server, call + stopListening(). + """ + if not thread: + raise NotImplementedError, "listen() needs threading to work" + + class ConfigStreamHandler(StreamRequestHandler): + """ + Handler for a logging configuration request. + + It expects a completely new logging configuration and uses fileConfig + to install it. + """ + def handle(self): + """ + Handle a request. + + Each request is expected to be a 4-byte length, + followed by the config file. Uses fileConfig() to do the + grunt work. + """ + import tempfile + try: + conn = self.connection + chunk = conn.recv(4) + if len(chunk) == 4: + slen = struct.unpack(">L", chunk)[0] + chunk = self.connection.recv(slen) + while len(chunk) < slen: + chunk = chunk + conn.recv(slen - len(chunk)) + #Apply new configuration. We'd like to be able to + #create a StringIO and pass that in, but unfortunately + #1.5.2 ConfigParser does not support reading file + #objects, only actual files. So we create a temporary + #file and remove it later. + file = tempfile.mktemp(".ini") + f = open(file, "w") + f.write(chunk) + f.close() + fileConfig(file) + os.remove(file) + except socket.error, e: + if type(e.args) != types.TupleType: + raise + else: + errcode = e.args[0] + if errcode != RESET_ERROR: + raise + + class ConfigSocketReceiver(ThreadingTCPServer): + """ + A simple TCP socket-based logging config receiver. + """ + + allow_reuse_address = 1 + + def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, + handler=None): + ThreadingTCPServer.__init__(self, (host, port), handler) + logging._acquireLock() + self.abort = 0 + logging._releaseLock() + self.timeout = 1 + + def serve_until_stopped(self): + import select + abort = 0 + while not abort: + rd, wr, ex = select.select([self.socket.fileno()], + [], [], + self.timeout) + if rd: + self.handle_request() + logging._acquireLock() + abort = self.abort + logging._releaseLock() + + def serve(rcvr, hdlr, port): + server = rcvr(port=port, handler=hdlr) + global _listener + logging._acquireLock() + _listener = server + logging._releaseLock() + server.serve_until_stopped() + + return threading.Thread(target=serve, + args=(ConfigSocketReceiver, + ConfigStreamHandler, port)) + +def stopListening(): + """ + Stop the listening server which was created with a call to listen(). + """ + global _listener + if _listener: + logging._acquireLock() + _listener.abort = 1 + _listener = None + logging._releaseLock() diff --git a/DJAGEN/trunk/djagen/gezegen/planet/compat_logging/handlers.py b/DJAGEN/trunk/djagen/gezegen/planet/compat_logging/handlers.py new file mode 100755 index 0000000..26ca8ad --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/compat_logging/handlers.py @@ -0,0 +1,728 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, socket, types, os, string, cPickle, struct, time + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + +# +# Some constants... +# + +DEFAULT_TCP_LOGGING_PORT = 9020 +DEFAULT_UDP_LOGGING_PORT = 9021 +DEFAULT_HTTP_LOGGING_PORT = 9022 +DEFAULT_SOAP_LOGGING_PORT = 9023 +SYSLOG_UDP_PORT = 514 + + +class RotatingFileHandler(logging.FileHandler): + def __init__(self, filename, mode="a", maxBytes=0, backupCount=0): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + """ + logging.FileHandler.__init__(self, filename, mode) + self.maxBytes = maxBytes + self.backupCount = backupCount + if maxBytes > 0: + self.mode = "a" + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + + self.stream.close() + if self.backupCount > 0: + for i in range(self.backupCount - 1, 0, -1): + sfn = "%s.%d" % (self.baseFilename, i) + dfn = "%s.%d" % (self.baseFilename, i + 1) + if os.path.exists(sfn): + #print "%s -> %s" % (sfn, dfn) + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.baseFilename + ".1" + if os.path.exists(dfn): + os.remove(dfn) + os.rename(self.baseFilename, dfn) + #print "%s -> %s" % (self.baseFilename, dfn) + self.stream = open(self.baseFilename, "w") + + def emit(self, record): + """ + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + """ + if self.maxBytes > 0: # are we rolling over? + msg = "%s\n" % self.format(record) + self.stream.seek(0, 2) #due to non-posix-compliant Windows feature + if self.stream.tell() + len(msg) >= self.maxBytes: + self.doRollover() + logging.FileHandler.emit(self, record) + + +class SocketHandler(logging.Handler): + """ + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + """ + + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + + The attribute 'closeOnError' is set to 1 - which means that if + a socket error occurs, the socket is silently closed and then + reopened on the next logging call. + """ + logging.Handler.__init__(self) + self.host = host + self.port = port + self.sock = None + self.closeOnError = 0 + + def makeSocket(self): + """ + A factory method which allows subclasses to define the precise + type of socket they want. + """ + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((self.host, self.port)) + return s + + def send(self, s): + """ + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + """ + if hasattr(self.sock, "sendall"): + self.sock.sendall(s) + else: + sentsofar = 0 + left = len(s) + while left > 0: + sent = self.sock.send(s[sentsofar:]) + sentsofar = sentsofar + sent + left = left - sent + + def makePickle(self, record): + """ + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + """ + s = cPickle.dumps(record.__dict__, 1) + #n = len(s) + #slen = "%c%c" % ((n >> 8) & 0xFF, n & 0xFF) + slen = struct.pack(">L", len(s)) + return slen + s + + def handleError(self, record): + """ + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + """ + if self.closeOnError and self.sock: + self.sock.close() + self.sock = None #try to reconnect next time + else: + logging.Handler.handleError(self, record) + + def emit(self, record): + """ + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + """ + try: + s = self.makePickle(record) + if not self.sock: + self.sock = self.makeSocket() + self.send(s) + except: + self.handleError(record) + + def close(self): + """ + Closes the socket. + """ + if self.sock: + self.sock.close() + self.sock = None + +class DatagramHandler(SocketHandler): + """ + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + """ + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + """ + SocketHandler.__init__(self, host, port) + self.closeOnError = 0 + + def makeSocket(self): + """ + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + """ + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + return s + + def send(self, s): + """ + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + """ + self.sock.sendto(s, (self.host, self.port)) + +class SysLogHandler(logging.Handler): + """ + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + """ + + # from : + # ====================================================================== + # priorities/facilities are encoded into a single 32-bit quantity, where + # the bottom 3 bits are the priority (0-7) and the top 28 bits are the + # facility (0-big number). Both the priorities and the facilities map + # roughly one-to-one to strings in the syslogd(8) source code. This + # mapping is included in this file. + # + # priorities (these are ordered) + + LOG_EMERG = 0 # system is unusable + LOG_ALERT = 1 # action must be taken immediately + LOG_CRIT = 2 # critical conditions + LOG_ERR = 3 # error conditions + LOG_WARNING = 4 # warning conditions + LOG_NOTICE = 5 # normal but significant condition + LOG_INFO = 6 # informational + LOG_DEBUG = 7 # debug-level messages + + # facility codes + LOG_KERN = 0 # kernel messages + LOG_USER = 1 # random user-level messages + LOG_MAIL = 2 # mail system + LOG_DAEMON = 3 # system daemons + LOG_AUTH = 4 # security/authorization messages + LOG_SYSLOG = 5 # messages generated internally by syslogd + LOG_LPR = 6 # line printer subsystem + LOG_NEWS = 7 # network news subsystem + LOG_UUCP = 8 # UUCP subsystem + LOG_CRON = 9 # clock daemon + LOG_AUTHPRIV = 10 # security/authorization messages (private) + + # other codes through 15 reserved for system use + LOG_LOCAL0 = 16 # reserved for local use + LOG_LOCAL1 = 17 # reserved for local use + LOG_LOCAL2 = 18 # reserved for local use + LOG_LOCAL3 = 19 # reserved for local use + LOG_LOCAL4 = 20 # reserved for local use + LOG_LOCAL5 = 21 # reserved for local use + LOG_LOCAL6 = 22 # reserved for local use + LOG_LOCAL7 = 23 # reserved for local use + + priority_names = { + "alert": LOG_ALERT, + "crit": LOG_CRIT, + "critical": LOG_CRIT, + "debug": LOG_DEBUG, + "emerg": LOG_EMERG, + "err": LOG_ERR, + "error": LOG_ERR, # DEPRECATED + "info": LOG_INFO, + "notice": LOG_NOTICE, + "panic": LOG_EMERG, # DEPRECATED + "warn": LOG_WARNING, # DEPRECATED + "warning": LOG_WARNING, + } + + facility_names = { + "auth": LOG_AUTH, + "authpriv": LOG_AUTHPRIV, + "cron": LOG_CRON, + "daemon": LOG_DAEMON, + "kern": LOG_KERN, + "lpr": LOG_LPR, + "mail": LOG_MAIL, + "news": LOG_NEWS, + "security": LOG_AUTH, # DEPRECATED + "syslog": LOG_SYSLOG, + "user": LOG_USER, + "uucp": LOG_UUCP, + "local0": LOG_LOCAL0, + "local1": LOG_LOCAL1, + "local2": LOG_LOCAL2, + "local3": LOG_LOCAL3, + "local4": LOG_LOCAL4, + "local5": LOG_LOCAL5, + "local6": LOG_LOCAL6, + "local7": LOG_LOCAL7, + } + + def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): + """ + Initialize a handler. + + If address is specified as a string, UNIX socket is used. + If facility is not specified, LOG_USER is used. + """ + logging.Handler.__init__(self) + + self.address = address + self.facility = facility + if type(address) == types.StringType: + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + # syslog may require either DGRAM or STREAM sockets + try: + self.socket.connect(address) + except socket.error: + self.socket.close() + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.socket.connect(address) + self.unixsocket = 1 + else: + self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.unixsocket = 0 + + self.formatter = None + + # curious: when talking to the unix-domain '/dev/log' socket, a + # zero-terminator seems to be required. this string is placed + # into a class variable so that it can be overridden if + # necessary. + log_format_string = '<%d>%s\000' + + def encodePriority (self, facility, priority): + """ + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + """ + if type(facility) == types.StringType: + facility = self.facility_names[facility] + if type(priority) == types.StringType: + priority = self.priority_names[priority] + return (facility << 3) | priority + + def close (self): + """ + Closes the socket. + """ + if self.unixsocket: + self.socket.close() + + def emit(self, record): + """ + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + """ + msg = self.format(record) + """ + We need to convert record level to lowercase, maybe this will + change in the future. + """ + msg = self.log_format_string % ( + self.encodePriority(self.facility, + string.lower(record.levelname)), + msg) + try: + if self.unixsocket: + self.socket.send(msg) + else: + self.socket.sendto(msg, self.address) + except: + self.handleError(record) + +class SMTPHandler(logging.Handler): + """ + A handler class which sends an SMTP email for each logging event. + """ + def __init__(self, mailhost, fromaddr, toaddrs, subject): + """ + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. + """ + logging.Handler.__init__(self) + if type(mailhost) == types.TupleType: + host, port = mailhost + self.mailhost = host + self.mailport = port + else: + self.mailhost = mailhost + self.mailport = None + self.fromaddr = fromaddr + if type(toaddrs) == types.StringType: + toaddrs = [toaddrs] + self.toaddrs = toaddrs + self.subject = subject + + def getSubject(self, record): + """ + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + """ + return self.subject + + weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + + monthname = [None, + 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + + def date_time(self): + """Return the current date and time formatted for a MIME header.""" + year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time()) + s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + self.weekdayname[wd], + day, self.monthname[month], year, + hh, mm, ss) + return s + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + """ + try: + import smtplib + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port) + msg = self.format(record) + msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( + self.fromaddr, + string.join(self.toaddrs, ","), + self.getSubject(record), + self.date_time(), msg) + smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.quit() + except: + self.handleError(record) + +class NTEventLogHandler(logging.Handler): + """ + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + """ + def __init__(self, appname, dllname=None, logtype="Application"): + logging.Handler.__init__(self) + try: + import win32evtlogutil, win32evtlog + self.appname = appname + self._welu = win32evtlogutil + if not dllname: + dllname = os.path.split(self._welu.__file__) + dllname = os.path.split(dllname[0]) + dllname = os.path.join(dllname[0], r'win32service.pyd') + self.dllname = dllname + self.logtype = logtype + self._welu.AddSourceToRegistry(appname, dllname, logtype) + self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE + self.typemap = { + logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, + logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, + logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, + } + except ImportError: + print "The Python Win32 extensions for NT (service, event "\ + "logging) appear not to be available." + self._welu = None + + def getMessageID(self, record): + """ + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + """ + return 1 + + def getEventCategory(self, record): + """ + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + """ + return 0 + + def getEventType(self, record): + """ + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + """ + return self.typemap.get(record.levelno, self.deftype) + + def emit(self, record): + """ + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + """ + if self._welu: + try: + id = self.getMessageID(record) + cat = self.getEventCategory(record) + type = self.getEventType(record) + msg = self.format(record) + self._welu.ReportEvent(self.appname, id, cat, type, [msg]) + except: + self.handleError(record) + + def close(self): + """ + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + """ + #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) + pass + +class HTTPHandler(logging.Handler): + """ + A class which sends records to a Web server, using either GET or + POST semantics. + """ + def __init__(self, host, url, method="GET"): + """ + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + """ + logging.Handler.__init__(self) + method = string.upper(method) + if method not in ["GET", "POST"]: + raise ValueError, "method must be GET or POST" + self.host = host + self.url = url + self.method = method + + def mapLogRecord(self, record): + """ + Default implementation of mapping the log record into a dict + that is send as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + """ + return record.__dict__ + + def emit(self, record): + """ + Emit a record. + + Send the record to the Web server as an URL-encoded dictionary + """ + try: + import httplib, urllib + h = httplib.HTTP(self.host) + url = self.url + data = urllib.urlencode(self.mapLogRecord(record)) + if self.method == "GET": + if (string.find(url, '?') >= 0): + sep = '&' + else: + sep = '?' + url = url + "%c%s" % (sep, data) + h.putrequest(self.method, url) + if self.method == "POST": + h.putheader("Content-length", str(len(data))) + h.endheaders() + if self.method == "POST": + h.send(data) + h.getreply() #can't do anything with the result + except: + self.handleError(record) + +class BufferingHandler(logging.Handler): + """ + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + """ + def __init__(self, capacity): + """ + Initialize the handler with the buffer size. + """ + logging.Handler.__init__(self) + self.capacity = capacity + self.buffer = [] + + def shouldFlush(self, record): + """ + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + """ + return (len(self.buffer) >= self.capacity) + + def emit(self, record): + """ + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + """ + self.buffer.append(record) + if self.shouldFlush(record): + self.flush() + + def flush(self): + """ + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + """ + self.buffer = [] + +class MemoryHandler(BufferingHandler): + """ + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + """ + def __init__(self, capacity, flushLevel=logging.ERROR, target=None): + """ + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + """ + BufferingHandler.__init__(self, capacity) + self.flushLevel = flushLevel + self.target = target + + def shouldFlush(self, record): + """ + Check for buffer full or a record at the flushLevel or higher. + """ + return (len(self.buffer) >= self.capacity) or \ + (record.levelno >= self.flushLevel) + + def setTarget(self, target): + """ + Set the target handler for this handler. + """ + self.target = target + + def flush(self): + """ + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + """ + if self.target: + for record in self.buffer: + self.target.handle(record) + self.buffer = [] + + def close(self): + """ + Flush, set the target to None and lose the buffer. + """ + self.flush() + self.target = None + self.buffer = [] diff --git a/DJAGEN/trunk/djagen/gezegen/planet/feedparser.py b/DJAGEN/trunk/djagen/gezegen/planet/feedparser.py new file mode 100755 index 0000000..615ee7e --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/feedparser.py @@ -0,0 +1,2931 @@ +#!/usr/bin/env python +"""Universal feed parser + +Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds + +Visit http://feedparser.org/ for the latest version +Visit http://feedparser.org/docs/ for the latest documentation + +Required: Python 2.1 or later +Recommended: Python 2.3 or later +Recommended: CJKCodecs and iconv_codec +""" + +__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs" +__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.""" +__author__ = "Mark Pilgrim " +__contributors__ = ["Jason Diamond ", + "John Beimler ", + "Fazal Majid ", + "Aaron Swartz ", + "Kevin Marks "] +_debug = 0 + +# HTTP "User-Agent" header to send to servers when downloading feeds. +# If you are embedding feedparser in a larger application, you should +# change this to your application name and URL. +USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ + +# HTTP "Accept" header to send to servers when downloading feeds. If you don't +# want to send an Accept header, set this to None. +ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" + +# List of preferred XML parsers, by SAX driver name. These will be tried first, +# but if they're not installed, Python will keep searching through its own list +# of pre-installed parsers until it finds one that supports everything we need. +PREFERRED_XML_PARSERS = ["drv_libxml2"] + +# If you want feedparser to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +# ---------- required modules (should come with any Python distribution) ---------- +import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 +try: + from cStringIO import StringIO as _StringIO +except: + from StringIO import StringIO as _StringIO + +# ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- + +# gzip is included with most Python distributions, but may not be available if you compiled your own +try: + import gzip +except: + gzip = None +try: + import zlib +except: + zlib = None + +# If a real XML parser is available, feedparser will attempt to use it. feedparser has +# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the +# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some +# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. +try: + import xml.sax + xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers + from xml.sax.saxutils import escape as _xmlescape + _XML_AVAILABLE = 1 +except: + _XML_AVAILABLE = 0 + def _xmlescape(data,entities={}): + data = data.replace('&', '&') + data = data.replace('>', '>') + data = data.replace('<', '<') + for char, entity in entities: + data = data.replace(char, entity) + return data + +# base64 support for Atom feeds that contain embedded binary data +try: + import base64, binascii +except: + base64 = binascii = None + +# cjkcodecs and iconv_codec provide support for more character encodings. +# Both are available from http://cjkpython.i18n.org/ +try: + import cjkcodecs.aliases +except: + pass +try: + import iconv_codec +except: + pass + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 +except: + chardet = None + +# ---------- don't touch these ---------- +class ThingsNobodyCaresAboutButMe(Exception): pass +class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass +class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass +class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass +class UndeclaredNamespace(Exception): pass + +sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') +sgmllib.special = re.compile('' % (tag, self.strattrs(attrs)), escape=0) + + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # special hack for better tracking of empty textinput/image elements in illformed feeds + if (not prefix) and tag not in ('title', 'link', 'description', 'name'): + self.intextinput = 0 + if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): + self.inimage = 0 + + # call special handler (if defined) or default handler + methodname = '_start_' + prefix + suffix + try: + method = getattr(self, methodname) + return method(attrsD) + except AttributeError: + return self.push(prefix + suffix, 1) + + def unknown_endtag(self, tag): + if _debug: sys.stderr.write('end %s\n' % tag) + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # call special handler (if defined) or default handler + methodname = '_end_' + prefix + suffix + try: + method = getattr(self, methodname) + method() + except AttributeError: + self.pop(prefix + suffix) + + # track inline content + if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + # element declared itself as escaped markup, but it isn't really + self.contentparams['type'] = 'application/xhtml+xml' + if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': + tag = tag.split(':')[-1] + self.handle_data('' % tag, escape=0) + + # track xml:base and xml:lang going out of scope + if self.basestack: + self.basestack.pop() + if self.basestack and self.basestack[-1]: + self.baseuri = self.basestack[-1] + if self.langstack: + self.langstack.pop() + if self.langstack: # and (self.langstack[-1] is not None): + self.lang = self.langstack[-1] + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + if not self.elementstack: return + ref = ref.lower() + if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): + text = '&#%s;' % ref + else: + if ref[0] == 'x': + c = int(ref[1:], 16) + else: + c = int(ref) + text = unichr(c).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + if not self.elementstack: return + if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) + if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): + text = '&%s;' % ref + else: + # entity resolution graciously donated by Aaron Swartz + def name2cp(k): + import htmlentitydefs + if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 + return htmlentitydefs.name2codepoint[k] + k = htmlentitydefs.entitydefs[k] + if k.startswith('&#') and k.endswith(';'): + return int(k[2:-1]) # not in latin-1 + return ord(k) + try: name2cp(ref) + except KeyError: text = '&%s;' % ref + else: text = unichr(name2cp(ref)).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_data(self, text, escape=1): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + if not self.elementstack: return + if escape and self.contentparams.get('type') == 'application/xhtml+xml': + text = _xmlescape(text) + self.elementstack[-1][2].append(text) + + def handle_comment(self, text): + # called for each comment, e.g. + pass + + def handle_pi(self, text): + # called for each processing instruction, e.g. + pass + + def handle_decl(self, text): + pass + + def parse_declaration(self, i): + # override internal declaration handler to handle CDATA blocks + if _debug: sys.stderr.write('entering parse_declaration\n') + if self.rawdata[i:i+9] == '', i) + if k == -1: k = len(self.rawdata) + self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) + return k+3 + else: + k = self.rawdata.find('>', i) + return k+1 + + def mapContentType(self, contentType): + contentType = contentType.lower() + if contentType == 'text': + contentType = 'text/plain' + elif contentType == 'html': + contentType = 'text/html' + elif contentType == 'xhtml': + contentType = 'application/xhtml+xml' + return contentType + + def trackNamespace(self, prefix, uri): + loweruri = uri.lower() + if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: + self.version = 'rss090' + if loweruri == 'http://purl.org/rss/1.0/' and not self.version: + self.version = 'rss10' + if loweruri == 'http://www.w3.org/2005/atom' and not self.version: + self.version = 'atom10' + if loweruri.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + uri = 'http://backend.userland.com/rss' + loweruri = uri + if self._matchnamespaces.has_key(loweruri): + self.namespacemap[prefix] = self._matchnamespaces[loweruri] + self.namespacesInUse[self._matchnamespaces[loweruri]] = uri + else: + self.namespacesInUse[prefix or ''] = uri + + def resolveURI(self, uri): + return _urljoin(self.baseuri or '', uri) + + def decodeEntities(self, element, data): + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs]) + + def push(self, element, expectingText): + self.elementstack.append([element, expectingText, []]) + + def pop(self, element, stripWhitespace=1): + if not self.elementstack: return + if self.elementstack[-1][0] != element: return + + element, expectingText, pieces = self.elementstack.pop() + + if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml': + # remove enclosing child element, but only if it is a
    and + # only if all the remaining content is nested underneath it. + # This means that the divs would be retained in the following: + #
    foo
    bar
    + if pieces and (pieces[0] == '
    ' or pieces[0].startswith('
    ': + depth = 0 + for piece in pieces[:-1]: + if piece.startswith(''): + depth += 1 + else: + pieces = pieces[1:-1] + + output = ''.join(pieces) + if stripWhitespace: + output = output.strip() + if not expectingText: return output + + # decode base64 content + if base64 and self.contentparams.get('base64', 0): + try: + output = base64.decodestring(output) + except binascii.Error: + pass + except binascii.Incomplete: + pass + + # resolve relative URIs + if (element in self.can_be_relative_uri) and output: + output = self.resolveURI(output) + + # decode entities within embedded markup + if not self.contentparams.get('base64', 0): + output = self.decodeEntities(element, output) + + # remove temporary cruft from contentparams + try: + del self.contentparams['mode'] + except KeyError: + pass + try: + del self.contentparams['base64'] + except KeyError: + pass + + # resolve relative URIs within embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_relative_uris: + output = _resolveRelativeURIs(output, self.baseuri, self.encoding) + + # sanitize embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_dangerous_markup: + output = _sanitizeHTML(output, self.encoding) + + if self.encoding and type(output) != type(u''): + try: + output = unicode(output, self.encoding) + except: + pass + + # address common error where people take data that is already + # utf-8, presume that it is iso-8859-1, and re-encode it. + if self.encoding=='utf-8' and type(output) == type(u''): + try: + output = unicode(output.encode('iso-8859-1'), 'utf-8') + except: + pass + + # map win-1252 extensions to the proper code points + if type(output) == type(u''): + output = u''.join([c in cp1252 and cp1252[c] or c for c in output]) + + # categories/tags/keywords/whatever are handled in _end_category + if element == 'category': + return output + + # store output in appropriate place(s) + if self.inentry and not self.insource: + if element == 'content': + self.entries[-1].setdefault(element, []) + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element].append(contentparams) + elif element == 'link': + self.entries[-1][element] = output + if output: + self.entries[-1]['links'][-1]['href'] = output + else: + if element == 'description': + element = 'summary' + self.entries[-1][element] = output + if self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element + '_detail'] = contentparams + elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage): + context = self._getContext() + if element == 'description': + element = 'subtitle' + context[element] = output + if element == 'link': + context['links'][-1]['href'] = output + elif self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + context[element + '_detail'] = contentparams + return output + + def pushContent(self, tag, attrsD, defaultContentType, expectingText): + self.incontent += 1 + self.contentparams = FeedParserDict({ + 'type': self.mapContentType(attrsD.get('type', defaultContentType)), + 'language': self.lang, + 'base': self.baseuri}) + self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) + self.push(tag, expectingText) + + def popContent(self, tag): + value = self.pop(tag) + self.incontent -= 1 + self.contentparams.clear() + return value + + def _mapToStandardPrefix(self, name): + colonpos = name.find(':') + if colonpos <> -1: + prefix = name[:colonpos] + suffix = name[colonpos+1:] + prefix = self.namespacemap.get(prefix, prefix) + name = prefix + ':' + suffix + return name + + def _getAttribute(self, attrsD, name): + return attrsD.get(self._mapToStandardPrefix(name)) + + def _isBase64(self, attrsD, contentparams): + if attrsD.get('mode', '') == 'base64': + return 1 + if self.contentparams['type'].startswith('text/'): + return 0 + if self.contentparams['type'].endswith('+xml'): + return 0 + if self.contentparams['type'].endswith('/xml'): + return 0 + return 1 + + def _itsAnHrefDamnIt(self, attrsD): + href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) + if href: + try: + del attrsD['url'] + except KeyError: + pass + try: + del attrsD['uri'] + except KeyError: + pass + attrsD['href'] = href + return attrsD + + def _save(self, key, value): + context = self._getContext() + context.setdefault(key, value) + + def _start_rss(self, attrsD): + versionmap = {'0.91': 'rss091u', + '0.92': 'rss092', + '0.93': 'rss093', + '0.94': 'rss094'} + if not self.version: + attr_version = attrsD.get('version', '') + version = versionmap.get(attr_version) + if version: + self.version = version + elif attr_version.startswith('2.'): + self.version = 'rss20' + else: + self.version = 'rss' + + def _start_dlhottitles(self, attrsD): + self.version = 'hotrss' + + def _start_channel(self, attrsD): + self.infeed = 1 + self._cdf_common(attrsD) + _start_feedinfo = _start_channel + + def _cdf_common(self, attrsD): + if attrsD.has_key('lastmod'): + self._start_modified({}) + self.elementstack[-1][-1] = attrsD['lastmod'] + self._end_modified() + if attrsD.has_key('href'): + self._start_link({}) + self.elementstack[-1][-1] = attrsD['href'] + self._end_link() + + def _start_feed(self, attrsD): + self.infeed = 1 + versionmap = {'0.1': 'atom01', + '0.2': 'atom02', + '0.3': 'atom03'} + if not self.version: + attr_version = attrsD.get('version') + version = versionmap.get(attr_version) + if version: + self.version = version + else: + self.version = 'atom' + + def _end_channel(self): + self.infeed = 0 + _end_feed = _end_channel + + def _start_image(self, attrsD): + self.inimage = 1 + self.push('image', 0) + context = self._getContext() + context.setdefault('image', FeedParserDict()) + + def _end_image(self): + self.pop('image') + self.inimage = 0 + + def _start_textinput(self, attrsD): + self.intextinput = 1 + self.push('textinput', 0) + context = self._getContext() + context.setdefault('textinput', FeedParserDict()) + _start_textInput = _start_textinput + + def _end_textinput(self): + self.pop('textinput') + self.intextinput = 0 + _end_textInput = _end_textinput + + def _start_author(self, attrsD): + self.inauthor = 1 + self.push('author', 1) + _start_managingeditor = _start_author + _start_dc_author = _start_author + _start_dc_creator = _start_author + _start_itunes_author = _start_author + + def _end_author(self): + self.pop('author') + self.inauthor = 0 + self._sync_author_detail() + _end_managingeditor = _end_author + _end_dc_author = _end_author + _end_dc_creator = _end_author + _end_itunes_author = _end_author + + def _start_itunes_owner(self, attrsD): + self.inpublisher = 1 + self.push('publisher', 0) + + def _end_itunes_owner(self): + self.pop('publisher') + self.inpublisher = 0 + self._sync_author_detail('publisher') + + def _start_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('contributor', 0) + + def _end_contributor(self): + self.pop('contributor') + self.incontributor = 0 + + def _start_dc_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('name', 0) + + def _end_dc_contributor(self): + self._end_name() + self.incontributor = 0 + + def _start_name(self, attrsD): + self.push('name', 0) + _start_itunes_name = _start_name + + def _end_name(self): + value = self.pop('name') + if self.inpublisher: + self._save_author('name', value, 'publisher') + elif self.inauthor: + self._save_author('name', value) + elif self.incontributor: + self._save_contributor('name', value) + elif self.intextinput: + context = self._getContext() + context['textinput']['name'] = value + _end_itunes_name = _end_name + + def _start_width(self, attrsD): + self.push('width', 0) + + def _end_width(self): + value = self.pop('width') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['width'] = value + + def _start_height(self, attrsD): + self.push('height', 0) + + def _end_height(self): + value = self.pop('height') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['height'] = value + + def _start_url(self, attrsD): + self.push('href', 1) + _start_homepage = _start_url + _start_uri = _start_url + + def _end_url(self): + value = self.pop('href') + if self.inauthor: + self._save_author('href', value) + elif self.incontributor: + self._save_contributor('href', value) + elif self.inimage: + context = self._getContext() + context['image']['href'] = value + elif self.intextinput: + context = self._getContext() + context['textinput']['link'] = value + _end_homepage = _end_url + _end_uri = _end_url + + def _start_email(self, attrsD): + self.push('email', 0) + _start_itunes_email = _start_email + + def _end_email(self): + value = self.pop('email') + if self.inpublisher: + self._save_author('email', value, 'publisher') + elif self.inauthor: + self._save_author('email', value) + elif self.incontributor: + self._save_contributor('email', value) + _end_itunes_email = _end_email + + def _getContext(self): + if self.insource: + context = self.sourcedata + elif self.inentry: + context = self.entries[-1] + else: + context = self.feeddata + return context + + def _save_author(self, key, value, prefix='author'): + context = self._getContext() + context.setdefault(prefix + '_detail', FeedParserDict()) + context[prefix + '_detail'][key] = value + self._sync_author_detail() + + def _save_contributor(self, key, value): + context = self._getContext() + context.setdefault('contributors', [FeedParserDict()]) + context['contributors'][-1][key] = value + + def _sync_author_detail(self, key='author'): + context = self._getContext() + detail = context.get('%s_detail' % key) + if detail: + name = detail.get('name') + email = detail.get('email') + if name and email: + context[key] = '%s (%s)' % (name, email) + elif name: + context[key] = name + elif email: + context[key] = email + else: + author = context.get(key) + if not author: return + emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author) + if not emailmatch: return + email = emailmatch.group(0) + # probably a better way to do the following, but it passes all the tests + author = author.replace(email, '') + author = author.replace('()', '') + author = author.strip() + if author and (author[0] == '('): + author = author[1:] + if author and (author[-1] == ')'): + author = author[:-1] + author = author.strip() + context.setdefault('%s_detail' % key, FeedParserDict()) + context['%s_detail' % key]['name'] = author + context['%s_detail' % key]['email'] = email + + def _start_subtitle(self, attrsD): + self.pushContent('subtitle', attrsD, 'text/plain', 1) + _start_tagline = _start_subtitle + _start_itunes_subtitle = _start_subtitle + + def _end_subtitle(self): + self.popContent('subtitle') + _end_tagline = _end_subtitle + _end_itunes_subtitle = _end_subtitle + + def _start_rights(self, attrsD): + self.pushContent('rights', attrsD, 'text/plain', 1) + _start_dc_rights = _start_rights + _start_copyright = _start_rights + + def _end_rights(self): + self.popContent('rights') + _end_dc_rights = _end_rights + _end_copyright = _end_rights + + def _start_item(self, attrsD): + self.entries.append(FeedParserDict()) + self.push('item', 0) + self.inentry = 1 + self.guidislink = 0 + id = self._getAttribute(attrsD, 'rdf:about') + if id: + context = self._getContext() + context['id'] = id + self._cdf_common(attrsD) + _start_entry = _start_item + _start_product = _start_item + + def _end_item(self): + self.pop('item') + self.inentry = 0 + _end_entry = _end_item + + def _start_dc_language(self, attrsD): + self.push('language', 1) + _start_language = _start_dc_language + + def _end_dc_language(self): + self.lang = self.pop('language') + _end_language = _end_dc_language + + def _start_dc_publisher(self, attrsD): + self.push('publisher', 1) + _start_webmaster = _start_dc_publisher + + def _end_dc_publisher(self): + self.pop('publisher') + self._sync_author_detail('publisher') + _end_webmaster = _end_dc_publisher + + def _start_published(self, attrsD): + self.push('published', 1) + _start_dcterms_issued = _start_published + _start_issued = _start_published + + def _end_published(self): + value = self.pop('published') + self._save('published_parsed', _parse_date(value)) + _end_dcterms_issued = _end_published + _end_issued = _end_published + + def _start_updated(self, attrsD): + self.push('updated', 1) + _start_modified = _start_updated + _start_dcterms_modified = _start_updated + _start_pubdate = _start_updated + _start_dc_date = _start_updated + + def _end_updated(self): + value = self.pop('updated') + parsed_value = _parse_date(value) + self._save('updated_parsed', parsed_value) + _end_modified = _end_updated + _end_dcterms_modified = _end_updated + _end_pubdate = _end_updated + _end_dc_date = _end_updated + + def _start_created(self, attrsD): + self.push('created', 1) + _start_dcterms_created = _start_created + + def _end_created(self): + value = self.pop('created') + self._save('created_parsed', _parse_date(value)) + _end_dcterms_created = _end_created + + def _start_expirationdate(self, attrsD): + self.push('expired', 1) + + def _end_expirationdate(self): + self._save('expired_parsed', _parse_date(self.pop('expired'))) + + def _start_cc_license(self, attrsD): + self.push('license', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('license') + + def _start_creativecommons_license(self, attrsD): + self.push('license', 1) + + def _end_creativecommons_license(self): + self.pop('license') + + def _addTag(self, term, scheme, label): + context = self._getContext() + tags = context.setdefault('tags', []) + if (not term) and (not scheme) and (not label): return + value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) + if value not in tags: + tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) + + def _start_category(self, attrsD): + if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) + term = attrsD.get('term') + scheme = attrsD.get('scheme', attrsD.get('domain')) + label = attrsD.get('label') + self._addTag(term, scheme, label) + self.push('category', 1) + _start_dc_subject = _start_category + _start_keywords = _start_category + + def _end_itunes_keywords(self): + for term in self.pop('itunes_keywords').split(): + self._addTag(term, 'http://www.itunes.com/', None) + + def _start_itunes_category(self, attrsD): + self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) + self.push('category', 1) + + def _end_category(self): + value = self.pop('category') + if not value: return + context = self._getContext() + tags = context['tags'] + if value and len(tags) and not tags[-1]['term']: + tags[-1]['term'] = value + else: + self._addTag(value, None, None) + _end_dc_subject = _end_category + _end_keywords = _end_category + _end_itunes_category = _end_category + + def _start_cloud(self, attrsD): + self._getContext()['cloud'] = FeedParserDict(attrsD) + + def _start_link(self, attrsD): + attrsD.setdefault('rel', 'alternate') + attrsD.setdefault('type', 'text/html') + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + expectingText = self.infeed or self.inentry or self.insource + context = self._getContext() + context.setdefault('links', []) + context['links'].append(FeedParserDict(attrsD)) + if attrsD['rel'] == 'enclosure': + self._start_enclosure(attrsD) + if attrsD.has_key('href'): + expectingText = 0 + if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): + context['link'] = attrsD['href'] + else: + self.push('link', expectingText) + _start_producturl = _start_link + + def _end_link(self): + value = self.pop('link') + context = self._getContext() + if self.intextinput: + context['textinput']['link'] = value + if self.inimage: + context['image']['link'] = value + _end_producturl = _end_link + + def _start_guid(self, attrsD): + self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') + self.push('id', 1) + + def _end_guid(self): + value = self.pop('id') + self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) + if self.guidislink: + # guid acts as link, but only if 'ispermalink' is not present or is 'true', + # and only if the item doesn't already have a link element + self._save('link', value) + + def _start_title(self, attrsD): + self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + _start_dc_title = _start_title + _start_media_title = _start_title + + def _end_title(self): + value = self.popContent('title') + context = self._getContext() + if self.intextinput: + context['textinput']['title'] = value + elif self.inimage: + context['image']['title'] = value + _end_dc_title = _end_title + _end_media_title = _end_title + + def _start_description(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) + + def _start_abstract(self, attrsD): + self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + + def _end_description(self): + if self._summaryKey == 'content': + self._end_content() + else: + value = self.popContent('description') + context = self._getContext() + if self.intextinput: + context['textinput']['description'] = value + elif self.inimage: + context['image']['description'] = value + self._summaryKey = None + _end_abstract = _end_description + + def _start_info(self, attrsD): + self.pushContent('info', attrsD, 'text/plain', 1) + _start_feedburner_browserfriendly = _start_info + + def _end_info(self): + self.popContent('info') + _end_feedburner_browserfriendly = _end_info + + def _start_generator(self, attrsD): + if attrsD: + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + self._getContext()['generator_detail'] = FeedParserDict(attrsD) + self.push('generator', 1) + + def _end_generator(self): + value = self.pop('generator') + context = self._getContext() + if context.has_key('generator_detail'): + context['generator_detail']['name'] = value + + def _start_admin_generatoragent(self, attrsD): + self.push('generator', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('generator') + self._getContext()['generator_detail'] = FeedParserDict({'href': value}) + + def _start_admin_errorreportsto(self, attrsD): + self.push('errorreportsto', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('errorreportsto') + + def _start_summary(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self._summaryKey = 'summary' + self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) + _start_itunes_summary = _start_summary + + def _end_summary(self): + if self._summaryKey == 'content': + self._end_content() + else: + self.popContent(self._summaryKey or 'summary') + self._summaryKey = None + _end_itunes_summary = _end_summary + + def _start_enclosure(self, attrsD): + attrsD = self._itsAnHrefDamnIt(attrsD) + self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) + href = attrsD.get('href') + if href: + context = self._getContext() + if not context.get('id'): + context['id'] = href + + def _start_source(self, attrsD): + self.insource = 1 + + def _end_source(self): + self.insource = 0 + self._getContext()['source'] = copy.deepcopy(self.sourcedata) + self.sourcedata.clear() + + def _start_content(self, attrsD): + self.pushContent('content', attrsD, 'text/plain', 1) + src = attrsD.get('src') + if src: + self.contentparams['src'] = src + self.push('content', 1) + + def _start_prodlink(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + + def _start_body(self, attrsD): + self.pushContent('content', attrsD, 'application/xhtml+xml', 1) + _start_xhtml_body = _start_body + + def _start_content_encoded(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + _start_fullitem = _start_content_encoded + + def _end_content(self): + copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) + value = self.popContent('content') + if copyToDescription: + self._save('description', value) + _end_body = _end_content + _end_xhtml_body = _end_content + _end_content_encoded = _end_content + _end_fullitem = _end_content + _end_prodlink = _end_content + + def _start_itunes_image(self, attrsD): + self.push('itunes_image', 0) + self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) + _start_itunes_link = _start_itunes_image + + def _end_itunes_block(self): + value = self.pop('itunes_block', 0) + self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 + + def _end_itunes_explicit(self): + value = self.pop('itunes_explicit', 0) + self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 + +if _XML_AVAILABLE: + class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): + def __init__(self, baseuri, baselang, encoding): + if _debug: sys.stderr.write('trying StrictFeedParser\n') + xml.sax.handler.ContentHandler.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + self.bozo = 0 + self.exc = None + + def startPrefixMapping(self, prefix, uri): + self.trackNamespace(prefix, uri) + + def startElementNS(self, name, qname, attrs): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if lowernamespace.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + namespace = 'http://backend.userland.com/rss' + lowernamespace = namespace + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = None + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): + raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) + + # qname implementation is horribly broken in Python 2.1 (it + # doesn't report any), and slightly broken in Python 2.2 (it + # doesn't report the xml: namespace). So we match up namespaces + # with a known list first, and then possibly override them with + # the qnames the SAX parser gives us (if indeed it gives us any + # at all). Thanks to MatejC for helping me test this and + # tirelessly telling me that it didn't work yet. + attrsD = {} + for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): + lowernamespace = (namespace or '').lower() + prefix = self._matchnamespaces.get(lowernamespace, '') + if prefix: + attrlocalname = prefix + ':' + attrlocalname + attrsD[str(attrlocalname).lower()] = attrvalue + for qname in attrs.getQNames(): + attrsD[str(qname).lower()] = attrs.getValueByQName(qname) + self.unknown_starttag(localname, attrsD.items()) + + def characters(self, text): + self.handle_data(text) + + def endElementNS(self, name, qname): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = '' + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + self.unknown_endtag(localname) + + def error(self, exc): + self.bozo = 1 + self.exc = exc + + def fatalError(self, exc): + self.error(exc) + raise exc + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + def __init__(self, encoding): + self.encoding = encoding + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + def feed(self, data): + data = re.compile(r'', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace + data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) + data = data.replace(''', "'") + data = data.replace('"', '"') + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + sgmllib.SGMLParser.close(self) + + def normalize_attrs(self, attrs): + # utility method to be called by descendants + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
    , tag='pre', attrs=[('class', 'screen')]
    +        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
    +        uattrs = []
    +        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
    +        for key, value in attrs:
    +            if type(value) != type(u''):
    +                value = unicode(value, self.encoding)
    +            uattrs.append((unicode(key, self.encoding), value))
    +        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
    +        if tag in self.elements_no_end_tag:
    +            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
    +        else:
    +            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
    +
    +    def unknown_endtag(self, tag):
    +        # called for each end tag, e.g. for 
    , tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + import htmlentitydefs + if not hasattr(htmlentitydefs, 'name2codepoint') or htmlentitydefs.name2codepoint.has_key(ref): + self.pieces.append('&%(ref)s;' % locals()) + else: + self.pieces.append('&%(ref)s' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): + def __init__(self, baseuri, baselang, encoding): + sgmllib.SGMLParser.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + + def decodeEntities(self, element, data): + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace('"', '"') + data = data.replace(''', ''') + data = data.replace(''', ''') + if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace(''', "'") + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % t for t in attrs]) + +class _RelativeURIResolver(_BaseHTMLProcessor): + relative_uris = [('a', 'href'), + ('applet', 'codebase'), + ('area', 'href'), + ('blockquote', 'cite'), + ('body', 'background'), + ('del', 'cite'), + ('form', 'action'), + ('frame', 'longdesc'), + ('frame', 'src'), + ('iframe', 'longdesc'), + ('iframe', 'src'), + ('head', 'profile'), + ('img', 'longdesc'), + ('img', 'src'), + ('img', 'usemap'), + ('input', 'src'), + ('input', 'usemap'), + ('ins', 'cite'), + ('link', 'href'), + ('object', 'classid'), + ('object', 'codebase'), + ('object', 'data'), + ('object', 'usemap'), + ('q', 'cite'), + ('script', 'src')] + + def __init__(self, baseuri, encoding): + _BaseHTMLProcessor.__init__(self, encoding) + self.baseuri = baseuri + + def resolveURI(self, uri): + return _urljoin(self.baseuri, uri) + + def unknown_starttag(self, tag, attrs): + attrs = self.normalize_attrs(attrs) + attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + +def _resolveRelativeURIs(htmlSource, baseURI, encoding): + if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') + p = _RelativeURIResolver(baseURI, encoding) + p.feed(htmlSource) + return p.output() + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', + 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', + 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', + 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', + 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', + 'thead', 'tr', 'tt', 'u', 'ul', 'var'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', + 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', + 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', + 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', + 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', + 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', + 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', + 'usemap', 'valign', 'value', 'vspace', 'width', 'xml:lang'] + + unacceptable_elements_with_end_tag = ['script', 'applet'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.unacceptablestack = 0 + + def unknown_starttag(self, tag, attrs): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack += 1 + return + attrs = self.normalize_attrs(attrs) + attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + + def unknown_endtag(self, tag): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack -= 1 + return + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.unacceptablestack: + _BaseHTMLProcessor.handle_data(self, text) + +def _sanitizeHTML(htmlSource, encoding): + p = _HTMLSanitizer(encoding) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count(''): + data = data.split('>', 1)[1] + if data.count('= '2.3.3' + assert base64 != None + user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') + realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] + self.add_password(realm, host, user, passw) + retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) + self.reset_retry_count() + return retry + except: + return self.http_error_default(req, fp, code, msg, headers) + +def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): + """URL, filename, or string --> stream + + This function lets you define parsers that take any input source + (URL, pathname to local or network file, or actual data as a string) + and deal with it in a uniform manner. Returned object is guaranteed + to have all the basic stdio read methods (read, readline, readlines). + Just .close() the object when you're done with it. + + If the etag argument is supplied, it will be used as the value of an + If-None-Match request header. + + If the modified argument is supplied, it must be a tuple of 9 integers + as returned by gmtime() in the standard Python time module. This MUST + be in GMT (Greenwich Mean Time). The formatted date/time will be used + as the value of an If-Modified-Since request header. + + If the agent argument is supplied, it will be used as the value of a + User-Agent request header. + + If the referrer argument is supplied, it will be used as the value of a + Referer[sic] request header. + + If handlers is supplied, it is a list of handlers used to build a + urllib2 opener. + """ + + if hasattr(url_file_stream_or_string, 'read'): + return url_file_stream_or_string + + if url_file_stream_or_string == '-': + return sys.stdin + + if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): + if not agent: + agent = USER_AGENT + # test for inline user:password for basic auth + auth = None + if base64: + urltype, rest = urllib.splittype(url_file_stream_or_string) + realhost, rest = urllib.splithost(rest) + if realhost: + user_passwd, realhost = urllib.splituser(realhost) + if user_passwd: + url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) + auth = base64.encodestring(user_passwd).strip() + # try to open with urllib2 (to use optional headers) + request = urllib2.Request(url_file_stream_or_string) + request.add_header('User-Agent', agent) + if etag: + request.add_header('If-None-Match', etag) + if modified: + # format into an RFC 1123-compliant timestamp. We can't use + # time.strftime() since the %a and %b directives can be affected + # by the current locale, but RFC 2616 states that dates must be + # in English. + short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) + if referrer: + request.add_header('Referer', referrer) + if gzip and zlib: + request.add_header('Accept-encoding', 'gzip, deflate') + elif gzip: + request.add_header('Accept-encoding', 'gzip') + elif zlib: + request.add_header('Accept-encoding', 'deflate') + else: + request.add_header('Accept-encoding', '') + if auth: + request.add_header('Authorization', 'Basic %s' % auth) + if ACCEPT_HEADER: + request.add_header('Accept', ACCEPT_HEADER) + request.add_header('A-IM', 'feed') # RFC 3229 support + opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) + opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent + try: + return opener.open(request) + finally: + opener.close() # JohnD + + # try to open with native open function (if url_file_stream_or_string is a filename) + try: + return open(url_file_stream_or_string) + except: + pass + + # treat url_file_stream_or_string as string + return _StringIO(str(url_file_stream_or_string)) + +_date_handlers = [] +def registerDateHandler(func): + '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' + _date_handlers.insert(0, func) + +# ISO-8601 date parsing routines written by Fazal Majid. +# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 +# parser is beyond the scope of feedparser and would be a worthwhile addition +# to the Python library. +# A single regular expression cannot parse ISO 8601 date formats into groups +# as the standard is highly irregular (for instance is 030104 2003-01-04 or +# 0301-04-01), so we use templates instead. +# Please note the order in templates is significant because we need a +# greedy match. +_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', + 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', + '-YY-?MM', '-OOO', '-YY', + '--MM-?DD', '--MM', + '---DD', + 'CC', ''] +_iso8601_re = [ + tmpl.replace( + 'YYYY', r'(?P\d{4})').replace( + 'YY', r'(?P\d\d)').replace( + 'MM', r'(?P[01]\d)').replace( + 'DD', r'(?P[0123]\d)').replace( + 'OOO', r'(?P[0123]\d\d)').replace( + 'CC', r'(?P\d\d$)') + + r'(T?(?P\d{2}):(?P\d{2})' + + r'(:(?P\d{2}))?' + + r'(?P[+-](?P\d{2})(:(?P\d{2}))?|Z)?)?' + for tmpl in _iso8601_tmpl] +del tmpl +_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] +del regex +def _parse_date_iso8601(dateString): + '''Parse a variety of ISO-8601-compatible formats like 20040105''' + m = None + for _iso8601_match in _iso8601_matches: + m = _iso8601_match(dateString) + if m: break + if not m: return + if m.span() == (0, 0): return + params = m.groupdict() + ordinal = params.get('ordinal', 0) + if ordinal: + ordinal = int(ordinal) + else: + ordinal = 0 + year = params.get('year', '--') + if not year or year == '--': + year = time.gmtime()[0] + elif len(year) == 2: + # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 + year = 100 * int(time.gmtime()[0] / 100) + int(year) + else: + year = int(year) + month = params.get('month', '-') + if not month or month == '-': + # ordinals are NOT normalized by mktime, we simulate them + # by setting month=1, day=ordinal + if ordinal: + month = 1 + else: + month = time.gmtime()[1] + month = int(month) + day = params.get('day', 0) + if not day: + # see above + if ordinal: + day = ordinal + elif params.get('century', 0) or \ + params.get('year', 0) or params.get('month', 0): + day = 1 + else: + day = time.gmtime()[2] + else: + day = int(day) + # special case of the century - is the first year of the 21st century + # 2000 or 2001 ? The debate goes on... + if 'century' in params.keys(): + year = (int(params['century']) - 1) * 100 + 1 + # in ISO 8601 most fields are optional + for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: + if not params.get(field, None): + params[field] = 0 + hour = int(params.get('hour', 0)) + minute = int(params.get('minute', 0)) + second = int(params.get('second', 0)) + # weekday is normalized by mktime(), we can ignore it + weekday = 0 + # daylight savings is complex, but not needed for feedparser's purposes + # as time zones, if specified, include mention of whether it is active + # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and + # and most implementations have DST bugs + daylight_savings_flag = 0 + tm = [year, month, day, hour, minute, second, weekday, + ordinal, daylight_savings_flag] + # ISO 8601 time zone adjustments + tz = params.get('tz') + if tz and tz != 'Z': + if tz[0] == '-': + tm[3] += int(params.get('tzhour', 0)) + tm[4] += int(params.get('tzmin', 0)) + elif tz[0] == '+': + tm[3] -= int(params.get('tzhour', 0)) + tm[4] -= int(params.get('tzmin', 0)) + else: + return None + # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) + # which is guaranteed to normalize d/m/y/h/m/s. + # Many implementations have bugs, but we'll pretend they don't. + return time.localtime(time.mktime(tm)) +registerDateHandler(_parse_date_iso8601) + +# 8-bit date handling routines written by ytrewq1. +_korean_year = u'\ub144' # b3e2 in euc-kr +_korean_month = u'\uc6d4' # bff9 in euc-kr +_korean_day = u'\uc77c' # c0cf in euc-kr +_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr +_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr + +_korean_onblog_date_re = \ + re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ + (_korean_year, _korean_month, _korean_day)) +_korean_nate_date_re = \ + re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ + (_korean_am, _korean_pm)) +def _parse_date_onblog(dateString): + '''Parse a string according to the OnBlog 8-bit date format''' + m = _korean_onblog_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_onblog) + +def _parse_date_nate(dateString): + '''Parse a string according to the Nate 8-bit date format''' + m = _korean_nate_date_re.match(dateString) + if not m: return + hour = int(m.group(5)) + ampm = m.group(4) + if (ampm == _korean_pm): + hour += 12 + hour = str(hour) + if len(hour) == 1: + hour = '0' + hour + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_nate) + +_mssql_date_re = \ + re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') +def _parse_date_mssql(dateString): + '''Parse a string according to the MS SQL date format''' + m = _mssql_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_mssql) + +# Unicode strings for Greek date strings +_greek_months = \ + { \ + u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 + u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 + u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 + u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 + u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 + u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 + u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 + u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 + u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 + u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 + u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 + u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 + u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 + u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 + u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 + u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 + u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 + } + +_greek_wdays = \ + { \ + u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 + u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 + u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 + u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 + u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 + u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 + u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 + } + +_greek_date_format_re = \ + re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') + +def _parse_date_greek(dateString): + '''Parse a string according to a Greek 8-bit date format.''' + m = _greek_date_format_re.match(dateString) + if not m: return + try: + wday = _greek_wdays[m.group(1)] + month = _greek_months[m.group(3)] + except: + return + rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ + {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ + 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': m.group(8)} + if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) + return _parse_date_rfc822(rfc822date) +registerDateHandler(_parse_date_greek) + +# Unicode strings for Hungarian date strings +_hungarian_months = \ + { \ + u'janu\u00e1r': u'01', # e1 in iso-8859-2 + u'febru\u00e1ri': u'02', # e1 in iso-8859-2 + u'm\u00e1rcius': u'03', # e1 in iso-8859-2 + u'\u00e1prilis': u'04', # e1 in iso-8859-2 + u'm\u00e1ujus': u'05', # e1 in iso-8859-2 + u'j\u00fanius': u'06', # fa in iso-8859-2 + u'j\u00falius': u'07', # fa in iso-8859-2 + u'augusztus': u'08', + u'szeptember': u'09', + u'okt\u00f3ber': u'10', # f3 in iso-8859-2 + u'november': u'11', + u'december': u'12', + } + +_hungarian_date_format_re = \ + re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') + +def _parse_date_hungarian(dateString): + '''Parse a string according to a Hungarian 8-bit date format.''' + m = _hungarian_date_format_re.match(dateString) + if not m: return + try: + month = _hungarian_months[m.group(2)] + day = m.group(3) + if len(day) == 1: + day = '0' + day + hour = m.group(4) + if len(hour) == 1: + hour = '0' + hour + except: + return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ + {'year': m.group(1), 'month': month, 'day': day,\ + 'hour': hour, 'minute': m.group(5),\ + 'zonediff': m.group(6)} + if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_hungarian) + +# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by +# Drake and licensed under the Python license. Removed all range checking +# for month, day, hour, minute, and second, since mktime will normalize +# these later +def _parse_date_w3dtf(dateString): + def __extract_date(m): + year = int(m.group('year')) + if year < 100: + year = 100 * int(time.gmtime()[0] / 100) + int(year) + if year < 1000: + return 0, 0, 0 + julian = m.group('julian') + if julian: + julian = int(julian) + month = julian / 30 + 1 + day = julian % 30 + 1 + jday = None + while jday != julian: + t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) + jday = time.gmtime(t)[-2] + diff = abs(jday - julian) + if jday > julian: + if diff < day: + day = day - diff + else: + month = month - 1 + day = 31 + elif jday < julian: + if day + diff < 28: + day = day + diff + else: + month = month + 1 + return year, month, day + month = m.group('month') + day = 1 + if month is None: + month = 1 + else: + month = int(month) + day = m.group('day') + if day: + day = int(day) + else: + day = 1 + return year, month, day + + def __extract_time(m): + if not m: + return 0, 0, 0 + hours = m.group('hours') + if not hours: + return 0, 0, 0 + hours = int(hours) + minutes = int(m.group('minutes')) + seconds = m.group('seconds') + if seconds: + seconds = int(seconds) + else: + seconds = 0 + return hours, minutes, seconds + + def __extract_tzd(m): + '''Return the Time Zone Designator as an offset in seconds from UTC.''' + if not m: + return 0 + tzd = m.group('tzd') + if not tzd: + return 0 + if tzd == 'Z': + return 0 + hours = int(m.group('tzdhours')) + minutes = m.group('tzdminutes') + if minutes: + minutes = int(minutes) + else: + minutes = 0 + offset = (hours*60 + minutes) * 60 + if tzd[0] == '+': + return -offset + return offset + + __date_re = ('(?P\d\d\d\d)' + '(?:(?P-|)' + '(?:(?P\d\d\d)' + '|(?P\d\d)(?:(?P=dsep)(?P\d\d))?))?') + __tzd_re = '(?P[-+](?P\d\d)(?::?(?P\d\d))|Z)' + __tzd_rx = re.compile(__tzd_re) + __time_re = ('(?P\d\d)(?P:|)(?P\d\d)' + '(?:(?P=tsep)(?P\d\d(?:[.,]\d+)?))?' + + __tzd_re) + __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) + __datetime_rx = re.compile(__datetime_re) + m = __datetime_rx.match(dateString) + if (m is None) or (m.group() != dateString): return + gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) + if gmt[0] == 0: return + return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) +registerDateHandler(_parse_date_w3dtf) + +def _parse_date_rfc822(dateString): + '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' + data = dateString.split() + if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: + del data[0] + if len(data) == 4: + s = data[3] + i = s.find('+') + if i > 0: + data[3:] = [s[:i], s[i+1:]] + else: + data.append('') + dateString = " ".join(data) + if len(data) < 5: + dateString += ' 00:00:00 GMT' + tm = rfc822.parsedate_tz(dateString) + if tm: + return time.gmtime(rfc822.mktime_tz(tm)) +# rfc822.py defines several time zones, but we define some extra ones. +# 'ET' is equivalent to 'EST', etc. +_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} +rfc822._timezones.update(_additional_timezones) +registerDateHandler(_parse_date_rfc822) + +def _parse_date(dateString): + '''Parses a variety of date formats into a 9-tuple in GMT''' + for handler in _date_handlers: + try: + date9tuple = handler(dateString) + if not date9tuple: continue + if len(date9tuple) != 9: + if _debug: sys.stderr.write('date handler function must return 9-tuple\n') + raise ValueError + map(int, date9tuple) + return date9tuple + except Exception, e: + if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) + pass + return None + +def _getCharacterEncoding(http_headers, xml_data): + '''Get the character encoding of the XML document + + http_headers is a dictionary + xml_data is a raw string (not Unicode) + + This is so much trickier than it sounds, it's not even funny. + According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type + is application/xml, application/*+xml, + application/xml-external-parsed-entity, or application/xml-dtd, + the encoding given in the charset parameter of the HTTP Content-Type + takes precedence over the encoding given in the XML prefix within the + document, and defaults to 'utf-8' if neither are specified. But, if + the HTTP Content-Type is text/xml, text/*+xml, or + text/xml-external-parsed-entity, the encoding given in the XML prefix + within the document is ALWAYS IGNORED and only the encoding given in + the charset parameter of the HTTP Content-Type header should be + respected, and it defaults to 'us-ascii' if not specified. + + Furthermore, discussion on the atom-syntax mailing list with the + author of RFC 3023 leads me to the conclusion that any document + served with a Content-Type of text/* and no charset parameter + must be treated as us-ascii. (We now do this.) And also that it + must always be flagged as non-well-formed. (We now do this too.) + + If Content-Type is unspecified (input was local file or non-HTTP source) + or unrecognized (server just got it totally wrong), then go by the + encoding given in the XML prefix of the document and default to + 'iso-8859-1' as per the HTTP specification (RFC 2616). + + Then, assuming we didn't find a character encoding in the HTTP headers + (and the HTTP Content-type allowed us to look in the body), we need + to sniff the first few bytes of the XML data and try to determine + whether the encoding is ASCII-compatible. Section F of the XML + specification shows the way here: + http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + + If the sniffed encoding is not ASCII-compatible, we need to make it + ASCII compatible so that we can sniff further into the XML declaration + to find the encoding attribute, which will tell us the true encoding. + + Of course, none of this guarantees that we will be able to parse the + feed in the declared character encoding (assuming it was declared + correctly, which many are not). CJKCodecs and iconv_codec help a lot; + you should definitely install them if you can. + http://cjkpython.i18n.org/ + ''' + + def _parseHTTPContentType(content_type): + '''takes HTTP Content-Type header and returns (content type, charset) + + If no charset is specified, returns (content type, '') + If no content type is specified, returns ('', '') + Both return parameters are guaranteed to be lowercase strings + ''' + content_type = content_type or '' + content_type, params = cgi.parse_header(content_type) + return content_type, params.get('charset', '').replace("'", '') + + sniffed_xml_encoding = '' + xml_encoding = '' + true_encoding = '' + http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) + # Must sniff for non-ASCII-compatible character encodings before + # searching for XML declaration. This heuristic is defined in + # section F of the XML specification: + # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + try: + if xml_data[:4] == '\x4c\x6f\xa7\x94': + # EBCDIC + xml_data = _ebcdic_to_ascii(xml_data) + elif xml_data[:4] == '\x00\x3c\x00\x3f': + # UTF-16BE + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): + # UTF-16BE with BOM + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x3f\x00': + # UTF-16LE + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): + # UTF-16LE with BOM + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\x00\x3c': + # UTF-32BE + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x00\x00': + # UTF-32LE + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\xfe\xff': + # UTF-32BE with BOM + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\xff\xfe\x00\x00': + # UTF-32LE with BOM + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') + elif xml_data[:3] == '\xef\xbb\xbf': + # UTF-8 with BOM + sniffed_xml_encoding = 'utf-8' + xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') + else: + # ASCII-compatible + pass + xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) + except: + xml_encoding_match = None + if xml_encoding_match: + xml_encoding = xml_encoding_match.groups()[0].lower() + if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): + xml_encoding = sniffed_xml_encoding + acceptable_content_type = 0 + application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') + text_content_types = ('text/xml', 'text/xml-external-parsed-entity') + if (http_content_type in application_content_types) or \ + (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): + acceptable_content_type = 1 + true_encoding = http_encoding or xml_encoding or 'utf-8' + elif (http_content_type in text_content_types) or \ + (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): + acceptable_content_type = 1 + true_encoding = http_encoding or 'us-ascii' + elif http_content_type.startswith('text/'): + true_encoding = http_encoding or 'us-ascii' + elif http_headers and (not http_headers.has_key('content-type')): + true_encoding = xml_encoding or 'iso-8859-1' + else: + true_encoding = xml_encoding or 'utf-8' + return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type + +def _toUTF8(data, encoding): + '''Changes an XML data stream on the fly to specify a new encoding + + data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already + encoding is a string recognized by encodings.aliases + ''' + if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) + # strip Byte Order Mark (if present) + if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16be': + sys.stderr.write('trying utf-16be instead\n') + encoding = 'utf-16be' + data = data[2:] + elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16le': + sys.stderr.write('trying utf-16le instead\n') + encoding = 'utf-16le' + data = data[2:] + elif data[:3] == '\xef\xbb\xbf': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-8': + sys.stderr.write('trying utf-8 instead\n') + encoding = 'utf-8' + data = data[3:] + elif data[:4] == '\x00\x00\xfe\xff': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32be': + sys.stderr.write('trying utf-32be instead\n') + encoding = 'utf-32be' + data = data[4:] + elif data[:4] == '\xff\xfe\x00\x00': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32le': + sys.stderr.write('trying utf-32le instead\n') + encoding = 'utf-32le' + data = data[4:] + newdata = unicode(data, encoding) + if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) + declmatch = re.compile('^<\?xml[^>]*?>') + newdecl = '''''' + if declmatch.search(newdata): + newdata = declmatch.sub(newdecl, newdata) + else: + newdata = newdecl + u'\n' + newdata + return newdata.encode('utf-8') + +def _stripDoctype(data): + '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) + + rss_version may be 'rss091n' or None + stripped_data is the same XML document, minus the DOCTYPE + ''' + entity_pattern = re.compile(r']*?)>', re.MULTILINE) + data = entity_pattern.sub('', data) + doctype_pattern = re.compile(r']*?)>', re.MULTILINE) + doctype_results = doctype_pattern.findall(data) + doctype = doctype_results and doctype_results[0] or '' + if doctype.lower().count('netscape'): + version = 'rss091n' + else: + version = None + data = doctype_pattern.sub('', data) + return version, data + +def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): + '''Parse a feed from a URL, file, stream, or string''' + result = FeedParserDict() + result['feed'] = FeedParserDict() + result['entries'] = [] + if _XML_AVAILABLE: + result['bozo'] = 0 + if type(handlers) == types.InstanceType: + handlers = [handlers] + try: + f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) + data = f.read() + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + f = None + + # if feed is gzip-compressed, decompress it + if f and data and hasattr(f, 'headers'): + if gzip and f.headers.get('content-encoding', '') == 'gzip': + try: + data = gzip.GzipFile(fileobj=_StringIO(data)).read() + except Exception, e: + # Some feeds claim to be gzipped but they're not, so + # we get garbage. Ideally, we should re-request the + # feed without the 'Accept-encoding: gzip' header, + # but we don't. + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + elif zlib and f.headers.get('content-encoding', '') == 'deflate': + try: + data = zlib.decompress(data, -zlib.MAX_WBITS) + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + + # save HTTP headers + if hasattr(f, 'info'): + info = f.info() + result['etag'] = info.getheader('ETag') + last_modified = info.getheader('Last-Modified') + if last_modified: + result['modified'] = _parse_date(last_modified) + if hasattr(f, 'url'): + result['href'] = f.url + result['status'] = 200 + if hasattr(f, 'status'): + result['status'] = f.status + if hasattr(f, 'headers'): + result['headers'] = f.headers.dict + if hasattr(f, 'close'): + f.close() + + # there are four encodings to keep track of: + # - http_encoding is the encoding declared in the Content-Type HTTP header + # - xml_encoding is the encoding declared in the ; changed +# project name +#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); +# removed unnecessary urllib code -- urllib2 should always be available anyway; +# return actual url, status, and full HTTP headers (as result['url'], +# result['status'], and result['headers']) if parsing a remote feed over HTTP -- +# this should pass all the HTTP tests at ; +# added the latest namespace-of-the-week for RSS 2.0 +#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom +# User-Agent (otherwise urllib2 sends two, which confuses some servers) +#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for +# inline and as used in some RSS 2.0 feeds +#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or +# textInput, and also to return the character encoding (if specified) +#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking +# nested divs within content (JohnD); fixed missing sys import (JohanS); +# fixed regular expression to capture XML character encoding (Andrei); +# added support for Atom 0.3-style links; fixed bug with textInput tracking; +# added support for cloud (MartijnP); added support for multiple +# category/dc:subject (MartijnP); normalize content model: 'description' gets +# description (which can come from description, summary, or full content if no +# description), 'content' gets dict of base/language/type/value (which can come +# from content:encoded, xhtml:body, content, or fullitem); +# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang +# tracking; fixed bug tracking unknown tags; fixed bug tracking content when +# element is not in default namespace (like Pocketsoap feed); +# resolve relative URLs in link, guid, docs, url, comments, wfw:comment, +# wfw:commentRSS; resolve relative URLs within embedded HTML markup in +# description, xhtml:body, content, content:encoded, title, subtitle, +# summary, info, tagline, and copyright; added support for pingback and +# trackback namespaces +#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback +# namespaces, as opposed to 2.6 when I said I did but didn't really; +# sanitize HTML markup within some elements; added mxTidy support (if +# installed) to tidy HTML markup within some elements; fixed indentation +# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available +# (FazalM); universal date parsing and normalization (FazalM): 'created', modified', +# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', +# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' +# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa +#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory +# leak not closing url opener (JohnD); added dc:publisher support (MarekK); +# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) +#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed
    tags in +# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); +# fixed relative URI processing for guid (skadz); added ICBM support; added +# base64 support +#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many +# blogspot.com sites); added _debug variable +#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing +#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); +# added several new supported namespaces; fixed bug tracking naked markup in +# description; added support for enclosure; added support for source; re-added +# support for cloud which got dropped somehow; added support for expirationDate +#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking +# xml:base URI, one for documents that don't define one explicitly and one for +# documents that define an outer and an inner xml:base that goes out of scope +# before the end of the document +#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level +#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] +# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; +# added support for creativeCommons:license and cc:license; added support for +# full Atom content model in title, tagline, info, copyright, summary; fixed bug +# with gzip encoding (not always telling server we support it when we do) +#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail +# (dictionary of 'name', 'url', 'email'); map author to author_detail if author +# contains name + email address +#3.0b8 - 1/28/2004 - MAP - added support for contributor +#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added +# support for summary +#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from +# xml.util.iso8601 +#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain +# dangerous markup; fiddled with decodeEntities (not right); liberalized +# date parsing even further +#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); +# added support to Atom 0.2 subtitle; added support for Atom content model +# in copyright; better sanitizing of dangerous HTML elements with end tags +# (script, frameset) +#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, +# etc.) in embedded markup, in either HTML or XHTML form (
    ,
    ,
    ) +#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under +# Python 2.1 +#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; +# fixed bug capturing author and contributor URL; fixed bug resolving relative +# links in author and contributor URL; fixed bug resolvin relative links in +# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's +# namespace tests, and included them permanently in the test suite with his +# permission; fixed namespace handling under Python 2.1 +#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) +#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 +#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); +# use libxml2 (if available) +#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author +# name was in parentheses; removed ultra-problematic mxTidy support; patch to +# workaround crash in PyXML/expat when encountering invalid entities +# (MarkMoraes); support for textinput/textInput +#3.0b20 - 4/7/2004 - MAP - added CDF support +#3.0b21 - 4/14/2004 - MAP - added Hot RSS support +#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in +# results dict; changed results dict to allow getting values with results.key +# as well as results[key]; work around embedded illformed HTML with half +# a DOCTYPE; work around malformed Content-Type header; if character encoding +# is wrong, try several common ones before falling back to regexes (if this +# works, bozo_exception is set to CharacterEncodingOverride); fixed character +# encoding issues in BaseHTMLProcessor by tracking encoding and converting +# from Unicode to raw strings before feeding data to sgmllib.SGMLParser; +# convert each value in results to Unicode (if possible), even if using +# regex-based parsing +#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain +# high-bit characters in attributes in embedded HTML in description (thanks +# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in +# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking +# about a mapped key +#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and +# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could +# cause the same encoding to be tried twice (even if it failed the first time); +# fixed DOCTYPE stripping when DOCTYPE contained entity declarations; +# better textinput and image tracking in illformed RSS 1.0 feeds +#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed +# my blink tag tests +#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that +# failed to parse utf-16 encoded feeds; made source into a FeedParserDict; +# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; +# added support for image; refactored parse() fallback logic to try other +# encodings if SAX parsing fails (previously it would only try other encodings +# if re-encoding failed); remove unichr madness in normalize_attrs now that +# we're properly tracking encoding in and out of BaseHTMLProcessor; set +# feed.language from root-level xml:lang; set entry.id from rdf:about; +# send Accept header +#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between +# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are +# windows-1252); fixed regression that could cause the same encoding to be +# tried twice (even if it failed the first time) +#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; +# recover from malformed content-type header parameter with no equals sign +# ('text/xml; charset:iso-8859-1') +#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities +# to Unicode equivalents in illformed feeds (aaronsw); added and +# passed tests for converting character entities to Unicode equivalents +# in illformed feeds (aaronsw); test for valid parsers when setting +# XML_AVAILABLE; make version and encoding available when server returns +# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like +# digest auth or proxy support); add code to parse username/password +# out of url and send as basic authentication; expose downloading-related +# exceptions in bozo_exception (aaronsw); added __contains__ method to +# FeedParserDict (aaronsw); added publisher_detail (aaronsw) +#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always +# convert feed to UTF-8 before passing to XML parser; completely revamped +# logic for determining character encoding and attempting XML parsing +# (much faster); increased default timeout to 20 seconds; test for presence +# of Location header on redirects; added tests for many alternate character +# encodings; support various EBCDIC encodings; support UTF-16BE and +# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support +# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no +# XML parsers are available; added support for 'Content-encoding: deflate'; +# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules +# are available +#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure +# problem tracking xml:base and xml:lang if element declares it, child +# doesn't, first grandchild redeclares it, and second grandchild doesn't; +# refactored date parsing; defined public registerDateHandler so callers +# can add support for additional date formats at runtime; added support +# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added +# zopeCompatibilityHack() which turns FeedParserDict into a regular +# dictionary, required for Zope compatibility, and also makes command- +# line debugging easier because pprint module formats real dictionaries +# better than dictionary-like objects; added NonXMLContentType exception, +# which is stored in bozo_exception when a feed is served with a non-XML +# media type such as 'text/plain'; respect Content-Language as default +# language if not xml:lang is present; cloud dict is now FeedParserDict; +# generator dict is now FeedParserDict; better tracking of xml:lang, +# including support for xml:lang='' to unset the current language; +# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default +# namespace; don't overwrite final status on redirects (scenarios: +# redirecting to a URL that returns 304, redirecting to a URL that +# redirects to another URL with a different type of redirect); add +# support for HTTP 303 redirects +#4.0 - MAP - support for relative URIs in xml:base attribute; fixed +# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; +# support for Atom 1.0; support for iTunes extensions; new 'tags' for +# categories/keywords/etc. as array of dict +# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 +# terminology; parse RFC 822-style dates with no time; lots of other +# bug fixes +#4.1 - MAP - removed socket timeout; added support for chardet library diff --git a/DJAGEN/trunk/djagen/gezegen/planet/htmltmpl.py b/DJAGEN/trunk/djagen/gezegen/planet/htmltmpl.py new file mode 100755 index 0000000..be6e41b --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/htmltmpl.py @@ -0,0 +1,1480 @@ + +""" A templating engine for separation of code and HTML. + + The documentation of this templating engine is separated to two parts: + + 1. Description of the templating language. + + 2. Documentation of classes and API of this module that provides + a Python implementation of the templating language. + + All the documentation can be found in 'doc' directory of the + distribution tarball or at the homepage of the engine. + Latest versions of this module are also available at that website. + + You can use and redistribute this module under conditions of the + GNU General Public License that can be found either at + [ http://www.gnu.org/ ] or in file "LICENSE" contained in the + distribution tarball of this module. + + Copyright (c) 2001 Tomas Styblo, tripie@cpan.org + + @name htmltmpl + @version 1.22 + @author-name Tomas Styblo + @author-email tripie@cpan.org + @website http://htmltmpl.sourceforge.net/ + @license-name GNU GPL + @license-url http://www.gnu.org/licenses/gpl.html +""" + +__version__ = 1.22 +__author__ = "Tomas Styblo (tripie@cpan.org)" + +# All imported modules are part of the standard Python library. + +from types import * +import re +import os +import os.path +import pprint # only for debugging +import sys +import copy +import cgi # for HTML escaping of variables +import urllib # for URL escaping of variables +import cPickle # for template compilation +import gettext + +INCLUDE_DIR = "inc" + +# Total number of possible parameters. +# Increment if adding a parameter to any statement. +PARAMS_NUMBER = 3 + +# Relative positions of parameters in TemplateCompiler.tokenize(). +PARAM_NAME = 1 +PARAM_ESCAPE = 2 +PARAM_GLOBAL = 3 +PARAM_GETTEXT_STRING = 1 + +# Find a way to lock files. Currently implemented only for UNIX and windows. +LOCKTYPE_FCNTL = 1 +LOCKTYPE_MSVCRT = 2 +LOCKTYPE = None +try: + import fcntl +except: + try: + import msvcrt + except: + LOCKTYPE = None + else: + LOCKTYPE = LOCKTYPE_MSVCRT +else: + LOCKTYPE = LOCKTYPE_FCNTL +LOCK_EX = 1 +LOCK_SH = 2 +LOCK_UN = 3 + +############################################## +# CLASS: TemplateManager # +############################################## + +class TemplateManager: + """ Class that manages compilation and precompilation of templates. + + You should use this class whenever you work with templates + that are stored in a file. The class can create a compiled + template and transparently manage its precompilation. It also + keeps the precompiled templates up-to-date by modification times + comparisons. + """ + + def __init__(self, include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0): + """ Constructor. + + @header + __init__(include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0) + + @param include Enable or disable included templates. + This optional parameter can be used to enable or disable + TMPL_INCLUDE inclusion of templates. Disabling of + inclusion can improve performance a bit. The inclusion is + enabled by default. + + @param max_include Maximum depth of nested inclusions. + This optional parameter can be used to specify maximum depth of + nested TMPL_INCLUDE inclusions. It defaults to 5. + This setting prevents infinite recursive inclusions. + + @param precompile Enable or disable precompilation of templates. + This optional parameter can be used to enable or disable + creation and usage of precompiled templates. + + A precompiled template is saved to the same directory in + which the main template file is located. You need write + permissions to that directory. + + Precompilation provides a significant performance boost because + it's not necessary to parse the templates over and over again. + The boost is especially noticeable when templates that include + other templates are used. + + Comparison of modification times of the main template and all + included templates is used to ensure that the precompiled + templates are up-to-date. Templates are also recompiled if the + htmltmpl module is updated. + + The TemplateErrorexception is raised when the precompiled + template cannot be saved. Precompilation is enabled by default. + + Precompilation is available only on UNIX and Windows platforms, + because proper file locking which is necessary to ensure + multitask safe behaviour is platform specific and is not + implemented for other platforms. Attempts to enable precompilation + on the other platforms result in raise of the + TemplateError exception. + + @param comments Enable or disable template comments. + This optional parameter can be used to enable or disable + template comments. + Disabling of the comments can improve performance a bit. + Comments are enabled by default. + + @param gettext Enable or disable gettext support. + + @param debug Enable or disable debugging messages. + This optional parameter is a flag that can be used to enable + or disable debugging messages which are printed to the standard + error output. The debugging messages are disabled by default. + """ + # Save the optional parameters. + # These values are not modified by any method. + self._include = include + self._max_include = max_include + self._precompile = precompile + self._comments = comments + self._gettext = gettext + self._debug = debug + + # Find what module to use to lock files. + # File locking is necessary for the 'precompile' feature to be + # multitask/thread safe. Currently it works only on UNIX + # and Windows. Anyone willing to implement it on Mac ? + if precompile and not LOCKTYPE: + raise TemplateError, "Template precompilation is not "\ + "available on this platform." + self.DEB("INIT DONE") + + def prepare(self, file): + """ Preprocess, parse, tokenize and compile the template. + + If precompilation is enabled then this method tries to load + a precompiled form of the template from the same directory + in which the template source file is located. If it succeeds, + then it compares modification times stored in the precompiled + form to modification times of source files of the template, + including source files of all templates included via the + TMPL_INCLUDE statements. If any of the modification times + differs, then the template is recompiled and the precompiled + form updated. + + If precompilation is disabled, then this method parses and + compiles the template. + + @header prepare(file) + + @return Compiled template. + The methods returns an instance of the Template class + which is a compiled form of the template. This instance can be + used as input for the TemplateProcessor. + + @param file Path to the template file to prepare. + The method looks for the template file in current directory + if the parameter is a relative path. All included templates must + be placed in subdirectory 'inc' of the + directory in which the main template file is located. + """ + compiled = None + if self._precompile: + if self.is_precompiled(file): + try: + precompiled = self.load_precompiled(file) + except PrecompiledError, template: + print >> sys.stderr, "Htmltmpl: bad precompiled "\ + "template '%s' removed" % template + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + precompiled.debug(self._debug) + compile_params = (self._include, self._max_include, + self._comments, self._gettext) + if precompiled.is_uptodate(compile_params): + self.DEB("PRECOMPILED: UPTODATE") + compiled = precompiled + else: + self.DEB("PRECOMPILED: NOT UPTODATE") + compiled = self.update(precompiled) + else: + self.DEB("PRECOMPILED: NOT PRECOMPILED") + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + self.DEB("PRECOMPILATION DISABLED") + compiled = self.compile(file) + return compiled + + def update(self, template): + """ Update (recompile) a compiled template. + + This method recompiles a template compiled from a file. + If precompilation is enabled then the precompiled form saved on + disk is also updated. + + @header update(template) + + @return Recompiled template. + It's ensured that the returned template is up-to-date. + + @param template A compiled template. + This parameter should be an instance of the Template + class, created either by the TemplateManager or by the + TemplateCompiler. The instance must represent a template + compiled from a file on disk. + """ + self.DEB("UPDATE") + updated = self.compile(template.file()) + if self._precompile: + self.save_precompiled(updated) + return updated + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def lock_file(self, file, lock): + """ Provide platform independent file locking. + @hidden + """ + fd = file.fileno() + if LOCKTYPE == LOCKTYPE_FCNTL: + if lock == LOCK_SH: + fcntl.flock(fd, fcntl.LOCK_SH) + elif lock == LOCK_EX: + fcntl.flock(fd, fcntl.LOCK_EX) + elif lock == LOCK_UN: + fcntl.flock(fd, fcntl.LOCK_UN) + else: + raise TemplateError, "BUG: bad lock in lock_file" + elif LOCKTYPE == LOCKTYPE_MSVCRT: + if lock == LOCK_SH: + # msvcrt does not support shared locks :-( + msvcrt.locking(fd, msvcrt.LK_LOCK, 1) + elif lock == LOCK_EX: + msvcrt.locking(fd, msvcrt.LK_LOCK, 1) + elif lock == LOCK_UN: + msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) + else: + raise TemplateError, "BUG: bad lock in lock_file" + else: + raise TemplateError, "BUG: bad locktype in lock_file" + + def compile(self, file): + """ Compile the template. + @hidden + """ + return TemplateCompiler(self._include, self._max_include, + self._comments, self._gettext, + self._debug).compile(file) + + def is_precompiled(self, file): + """ Return true if the template is already precompiled on the disk. + This method doesn't check whether the compiled template is + uptodate. + @hidden + """ + filename = file + "c" # "template.tmplc" + if os.path.isfile(filename): + return 1 + else: + return 0 + + def load_precompiled(self, file): + """ Load precompiled template from disk. + + Remove the precompiled template file and recompile it + if the file contains corrupted or unpicklable data. + + @hidden + """ + filename = file + "c" # "template.tmplc" + self.DEB("LOADING PRECOMPILED") + try: + remove_bad = 0 + file = None + try: + file = open(filename, "rb") + self.lock_file(file, LOCK_SH) + precompiled = cPickle.load(file) + except IOError, (errno, errstr): + raise TemplateError, "IO error in load precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.UnpicklingError: + remove_bad = 1 + raise PrecompiledError, filename + except: + remove_bad = 1 + raise + else: + return precompiled + finally: + if file: + self.lock_file(file, LOCK_UN) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + def save_precompiled(self, template): + """ Save compiled template to disk in precompiled form. + + Associated metadata is also saved. It includes: filename of the + main template file, modification time of the main template file, + modification times of all included templates and version of the + htmltmpl module which compiled the template. + + The method removes a file which is saved only partially because + of some error. + + @hidden + """ + filename = template.file() + "c" # creates "template.tmplc" + # Check if we have write permission to the template's directory. + template_dir = os.path.dirname(os.path.abspath(filename)) + if not os.access(template_dir, os.W_OK): + raise TemplateError, "Cannot save precompiled templates "\ + "to '%s': write permission denied."\ + % template_dir + try: + remove_bad = 0 + file = None + try: + file = open(filename, "wb") # may truncate existing file + self.lock_file(file, LOCK_EX) + BINARY = 1 + READABLE = 0 + if self._debug: + cPickle.dump(template, file, READABLE) + else: + cPickle.dump(template, file, BINARY) + except IOError, (errno, errstr): + remove_bad = 1 + raise TemplateError, "IO error while saving precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.PicklingError, error: + remove_bad = 1 + raise TemplateError, "Pickling error while saving "\ + "precompiled template '%s': %s"\ + % (filename, error) + except: + remove_bad = 1 + raise + else: + self.DEB("SAVING PRECOMPILED") + finally: + if file: + self.lock_file(file, LOCK_UN) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + +############################################## +# CLASS: TemplateProcessor # +############################################## + +class TemplateProcessor: + """ Fill the template with data and process it. + + This class provides actual processing of a compiled template. + Use it to set template variables and loops and then obtain + result of the processing. + """ + + def __init__(self, html_escape=1, magic_vars=1, global_vars=0, debug=0): + """ Constructor. + + @header __init__(html_escape=1, magic_vars=1, global_vars=0, + debug=0) + + @param html_escape Enable or disable HTML escaping of variables. + This optional parameter is a flag that can be used to enable or + disable automatic HTML escaping of variables. + All variables are by default automatically HTML escaped. + The escaping process substitutes HTML brackets, ampersands and + double quotes with appropriate HTML entities. + + @param magic_vars Enable or disable loop magic variables. + This parameter can be used to enable or disable + "magic" context variables, that are automatically defined inside + loops. Magic variables are enabled by default. + + Refer to the language specification for description of these + magic variables. + + @param global_vars Globally activate global lookup of variables. + This optional parameter is a flag that can be used to specify + whether variables which cannot be found in the current scope + should be automatically looked up in enclosing scopes. + + Automatic global lookup is disabled by default. Global lookup + can be overriden on a per-variable basis by the + GLOBAL parameter of a TMPL_VAR + statement. + + @param debug Enable or disable debugging messages. + """ + self._html_escape = html_escape + self._magic_vars = magic_vars + self._global_vars = global_vars + self._debug = debug + + # Data structure containing variables and loops set by the + # application. Use debug=1, process some template and + # then check stderr to see how the structure looks. + # It's modified only by set() and reset() methods. + self._vars = {} + + # Following variables are for multipart templates. + self._current_part = 1 + self._current_pos = 0 + + def set(self, var, value): + """ Associate a value with top-level template variable or loop. + + A template identifier can represent either an ordinary variable + (string) or a loop. + + To assign a value to a string identifier pass a scalar + as the 'value' parameter. This scalar will be automatically + converted to string. + + To assign a value to a loop identifier pass a list of mappings as + the 'value' parameter. The engine iterates over this list and + assigns values from the mappings to variables in a template loop + block if a key in the mapping corresponds to a name of a variable + in the loop block. The number of mappings contained in this list + is equal to number of times the loop block is repeated in the + output. + + @header set(var, value) + @return No return value. + + @param var Name of template variable or loop. + @param value The value to associate. + + """ + # The correctness of character case is verified only for top-level + # variables. + if self.is_ordinary_var(value): + # template top-level ordinary variable + if not var.islower(): + raise TemplateError, "Invalid variable name '%s'." % var + elif type(value) == ListType: + # template top-level loop + if var != var.capitalize(): + raise TemplateError, "Invalid loop name '%s'." % var + else: + raise TemplateError, "Value of toplevel variable '%s' must "\ + "be either a scalar or a list." % var + self._vars[var] = value + self.DEB("VALUE SET: " + str(var)) + + def reset(self, keep_data=0): + """ Reset the template data. + + This method resets the data contained in the template processor + instance. The template processor instance can be used to process + any number of templates, but this method must be called after + a template is processed to reuse the instance, + + @header reset(keep_data=0) + @return No return value. + + @param keep_data Do not reset the template data. + Use this flag if you do not want the template data to be erased. + This way you can reuse the data contained in the instance of + the TemplateProcessor. + """ + self._current_part = 1 + self._current_pos = 0 + if not keep_data: + self._vars.clear() + self.DEB("RESET") + + def process(self, template, part=None): + """ Process a compiled template. Return the result as string. + + This method actually processes a template and returns + the result. + + @header process(template, part=None) + @return Result of the processing as string. + + @param template A compiled template. + Value of this parameter must be an instance of the + Template class created either by the + TemplateManager or by the TemplateCompiler. + + @param part The part of a multipart template to process. + This parameter can be used only together with a multipart + template. It specifies the number of the part to process. + It must be greater than zero, because the parts are numbered + from one. + + The parts must be processed in the right order. You + cannot process a part which precedes an already processed part. + + If this parameter is not specified, then the whole template + is processed, or all remaining parts are processed. + """ + self.DEB("APP INPUT:") + if self._debug: pprint.pprint(self._vars, sys.stderr) + if part != None and (part == 0 or part < self._current_part): + raise TemplateError, "process() - invalid part number" + + # This flag means "jump behind the end of current statement" or + # "skip the parameters of current statement". + # Even parameters that actually are not present in the template + # do appear in the list of tokens as empty items ! + skip_params = 0 + + # Stack for enabling or disabling output in response to TMPL_IF, + # TMPL_UNLESS, TMPL_ELSE and TMPL_LOOPs with no passes. + output_control = [] + ENABLE_OUTPUT = 1 + DISABLE_OUTPUT = 0 + + # Stacks for data related to loops. + loop_name = [] # name of a loop + loop_pass = [] # current pass of a loop (counted from zero) + loop_start = [] # index of loop start in token list + loop_total = [] # total number of passes in a loop + + tokens = template.tokens() + len_tokens = len(tokens) + out = "" # buffer for processed output + + # Recover position at which we ended after processing of last part. + i = self._current_pos + + # Process the list of tokens. + while 1: + if i == len_tokens: break + if skip_params: + # Skip the parameters following a statement. + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token.startswith("." + escape = tokens[i + PARAM_ESCAPE] + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + + # If output of current block is not disabled then append + # the substitued and escaped variable to the output. + if DISABLE_OUTPUT not in output_control: + value = str(self.find_value(var, loop_name, loop_pass, + loop_total, globalp)) + out += self.escape(value, escape) + self.DEB("VAR: " + str(var)) + + elif token == "." + skip_params = 1 + + # Find total number of passes in this loop. + passtotal = self.find_value(var, loop_name, loop_pass, + loop_total) + if not passtotal: passtotal = 0 + # Push data for this loop on the stack. + loop_total.append(passtotal) + loop_start.append(i) + loop_pass.append(0) + loop_name.append(var) + + # Disable output of loop block if the number of passes + # in this loop is zero. + if passtotal == 0: + # This loop is empty. + output_control.append(DISABLE_OUTPUT) + self.DEB("LOOP: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("LOOP: FIRST PASS: %s TOTAL: %d"\ + % (var, passtotal)) + + elif token == "." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(ENABLE_OUTPUT) + self.DEB("IF: ENABLE: " + str(var)) + else: + output_control.append(DISABLE_OUTPUT) + self.DEB("IF: DISABLE: " + str(var)) + + elif token == "." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(DISABLE_OUTPUT) + self.DEB("UNLESS: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("UNLESS: ENABLE: " + str(var)) + + elif token == "." + + # If this loop was not disabled, then record the pass. + if loop_total[-1] > 0: loop_pass[-1] += 1 + + if loop_pass[-1] == loop_total[-1]: + # There are no more passes in this loop. Pop + # the loop from stack. + loop_pass.pop() + loop_name.pop() + loop_start.pop() + loop_total.pop() + output_control.pop() + self.DEB("LOOP: END") + else: + # Jump to the beggining of this loop block + # to process next pass of the loop. + i = loop_start[-1] + self.DEB("LOOP: NEXT PASS") + + elif token == "." + output_control.pop() + self.DEB("IF: END") + + elif token == "." + output_control.pop() + self.DEB("UNLESS: END") + + elif token == "." + if output_control[-1] == DISABLE_OUTPUT: + # Condition was false, activate the ELSE block. + output_control[-1] = ENABLE_OUTPUT + self.DEB("ELSE: ENABLE") + elif output_control[-1] == ENABLE_OUTPUT: + # Condition was true, deactivate the ELSE block. + output_control[-1] = DISABLE_OUTPUT + self.DEB("ELSE: DISABLE") + else: + raise TemplateError, "BUG: ELSE: INVALID FLAG" + + elif token == " +

    + HTMLTMPL WARNING:
    + Cannot include template: %s +

    +
    + """ % filename + self.DEB("CANNOT INCLUDE WARNING") + + elif token == "." % token + + elif DISABLE_OUTPUT not in output_control: + # Raw textual template data. + # If output of current block is not disabled, then + # append template data to the output buffer. + out += token + + i += 1 + # end of the big while loop + + # Check whether all opening statements were closed. + if loop_name: raise TemplateError, "Missing ." + if output_control: raise TemplateError, "Missing or " + return out + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def find_value(self, var, loop_name, loop_pass, loop_total, + global_override=None): + """ Search the self._vars data structure to find variable var + located in currently processed pass of a loop which + is currently being processed. If the variable is an ordinary + variable, then return it. + + If the variable is an identificator of a loop, then + return the total number of times this loop will + be executed. + + Return an empty string, if the variable is not + found at all. + + @hidden + """ + # Search for the requested variable in magic vars if the name + # of the variable starts with "__" and if we are inside a loop. + if self._magic_vars and var.startswith("__") and loop_name: + return self.magic_var(var, loop_pass[-1], loop_total[-1]) + + # Search for an ordinary variable or for a loop. + # Recursively search in self._vars for the requested variable. + scope = self._vars + globals = [] + for i in range(len(loop_name)): + # If global lookup is on then push the value on the stack. + if ((self._global_vars and global_override != "0") or \ + global_override == "1") and scope.has_key(var) and \ + self.is_ordinary_var(scope[var]): + globals.append(scope[var]) + + # Descent deeper into the hierarchy. + if scope.has_key(loop_name[i]) and scope[loop_name[i]]: + scope = scope[loop_name[i]][loop_pass[i]] + else: + return "" + + if scope.has_key(var): + # Value exists in current loop. + if type(scope[var]) == ListType: + # The requested value is a loop. + # Return total number of its passes. + return len(scope[var]) + else: + return scope[var] + elif globals and \ + ((self._global_vars and global_override != "0") or \ + global_override == "1"): + # Return globally looked up value. + return globals.pop() + else: + # No value found. + if var[0].isupper(): + # This is a loop name. + # Return zero, because the user wants to know number + # of its passes. + return 0 + else: + return "" + + def magic_var(self, var, loop_pass, loop_total): + """ Resolve and return value of a magic variable. + Raise an exception if the magic variable is not recognized. + + @hidden + """ + self.DEB("MAGIC: '%s', PASS: %d, TOTAL: %d"\ + % (var, loop_pass, loop_total)) + if var == "__FIRST__": + if loop_pass == 0: + return 1 + else: + return 0 + elif var == "__LAST__": + if loop_pass == loop_total - 1: + return 1 + else: + return 0 + elif var == "__INNER__": + # If this is neither the first nor the last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + return 1 + else: + return 0 + elif var == "__PASS__": + # Magic variable __PASS__ counts passes from one. + return loop_pass + 1 + elif var == "__PASSTOTAL__": + return loop_total + elif var == "__ODD__": + # Internally pass numbers stored in loop_pass are counted from + # zero. But the template language presents them counted from one. + # Therefore we must add one to the actual loop_pass value to get + # the value we present to the user. + if (loop_pass + 1) % 2 != 0: + return 1 + else: + return 0 + elif var.startswith("__EVERY__"): + # Magic variable __EVERY__x is never true in first or last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + # Check if an integer follows the variable name. + try: + every = int(var[9:]) # nine is length of "__EVERY__" + except ValueError: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Invalid pass number." + else: + if not every: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Pass number cannot be zero." + elif (loop_pass + 1) % every == 0: + self.DEB("MAGIC: EVERY: " + str(every)) + return 1 + else: + return 0 + else: + return 0 + else: + raise TemplateError, "Invalid magic variable '%s'." % var + + def escape(self, str, override=""): + """ Escape a string either by HTML escaping or by URL escaping. + @hidden + """ + ESCAPE_QUOTES = 1 + if (self._html_escape and override != "NONE" and override != "0" and \ + override != "URL") or override == "HTML" or override == "1": + return cgi.escape(str, ESCAPE_QUOTES) + elif override == "URL": + return urllib.quote_plus(str) + else: + return str + + def is_ordinary_var(self, var): + """ Return true if var is a scalar. (not a reference to loop) + @hidden + """ + if type(var) == StringType or type(var) == IntType or \ + type(var) == LongType or type(var) == FloatType: + return 1 + else: + return 0 + + +############################################## +# CLASS: TemplateCompiler # +############################################## + +class TemplateCompiler: + """ Preprocess, parse, tokenize and compile the template. + + This class parses the template and produces a 'compiled' form + of it. This compiled form is an instance of the Template + class. The compiled form is used as input for the TemplateProcessor + which uses it to actually process the template. + + This class should be used direcly only when you need to compile + a template from a string. If your template is in a file, then you + should use the TemplateManager class which provides + a higher level interface to this class and also can save the + compiled template to disk in a precompiled form. + """ + + def __init__(self, include=1, max_include=5, comments=1, gettext=0, + debug=0): + """ Constructor. + + @header __init__(include=1, max_include=5, comments=1, gettext=0, + debug=0) + + @param include Enable or disable included templates. + @param max_include Maximum depth of nested inclusions. + @param comments Enable or disable template comments. + @param gettext Enable or disable gettext support. + @param debug Enable or disable debugging messages. + """ + + self._include = include + self._max_include = max_include + self._comments = comments + self._gettext = gettext + self._debug = debug + + # This is a list of filenames of all included templates. + # It's modified by the include_templates() method. + self._include_files = [] + + # This is a counter of current inclusion depth. It's used to prevent + # infinite recursive includes. + self._include_level = 0 + + def compile(self, file): + """ Compile template from a file. + + @header compile(file) + @return Compiled template. + The return value is an instance of the Template + class. + + @param file Filename of the template. + See the prepare() method of the TemplateManager + class for exaplanation of this parameter. + """ + + self.DEB("COMPILING FROM FILE: " + file) + self._include_path = os.path.join(os.path.dirname(file), INCLUDE_DIR) + tokens = self.parse(self.read(file)) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, file, self._include_files, + tokens, compile_params, self._debug) + + def compile_string(self, data): + """ Compile template from a string. + + This method compiles a template from a string. The + template cannot include any templates. + TMPL_INCLUDE statements are turned into warnings. + + @header compile_string(data) + @return Compiled template. + The return value is an instance of the Template + class. + + @param data String containing the template data. + """ + self.DEB("COMPILING FROM STRING") + self._include = 0 + tokens = self.parse(data) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, None, None, tokens, compile_params, + self._debug) + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def read(self, filename): + """ Read content of file and return it. Raise an error if a problem + occurs. + @hidden + """ + self.DEB("READING: " + filename) + try: + f = None + try: + f = open(filename, "r") + data = f.read() + except IOError, (errno, errstr): + raise TemplateError, "IO error while reading template '%s': "\ + "(%d) %s" % (filename, errno, errstr) + else: + return data + finally: + if f: f.close() + + def parse(self, template_data): + """ Parse the template. This method is recursively called from + within the include_templates() method. + + @return List of processing tokens. + @hidden + """ + if self._comments: + self.DEB("PREPROCESS: COMMENTS") + template_data = self.remove_comments(template_data) + tokens = self.tokenize(template_data) + if self._include: + self.DEB("PREPROCESS: INCLUDES") + self.include_templates(tokens) + return tokens + + def remove_comments(self, template_data): + """ Remove comments from the template data. + @hidden + """ + pattern = r"### .*" + return re.sub(pattern, "", template_data) + + def include_templates(self, tokens): + """ Process TMPL_INCLUDE statements. Use the include_level counter + to prevent infinite recursion. Record paths to all included + templates to self._include_files. + @hidden + """ + i = 0 + out = "" # buffer for output + skip_params = 0 + + # Process the list of tokens. + while 1: + if i == len(tokens): break + if skip_params: + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token == "." + self._include_level += 1 + if self._include_level > self._max_include: + # Do not include the template. + # Protection against infinite recursive includes. + skip_params = 1 + self.DEB("INCLUDE: LIMIT REACHED: " + filename) + else: + # Include the template. + skip_params = 0 + include_file = os.path.join(self._include_path, filename) + self._include_files.append(include_file) + include_data = self.read(include_file) + include_tokens = self.parse(include_data) + + # Append the tokens from the included template to actual + # position in the tokens list, replacing the TMPL_INCLUDE + # token and its parameters. + tokens[i:i+PARAMS_NUMBER+1] = include_tokens + i = i + len(include_tokens) + self.DEB("INCLUDED: " + filename) + continue # Do not increment 'i' below. + i += 1 + # end of the main while loop + + if self._include_level > 0: self._include_level -= 1 + return out + + def tokenize(self, template_data): + """ Split the template into tokens separated by template statements. + The statements itself and associated parameters are also + separately included in the resulting list of tokens. + Return list of the tokens. + + @hidden + """ + self.DEB("TOKENIZING TEMPLATE") + # NOTE: The TWO double quotes in character class in the regexp below + # are there only to prevent confusion of syntax highlighter in Emacs. + pattern = r""" + (?:^[ \t]+)? # eat spaces, tabs (opt.) + (< + (?:!--[ ])? # comment start + space (opt.) + /?TMPL_[A-Z]+ # closing slash / (opt.) + statement + [ a-zA-Z0-9""/.=:_\\-]* # this spans also comments ending (--) + >) + [%s]? # eat trailing newline (opt.) + """ % os.linesep + rc = re.compile(pattern, re.VERBOSE | re.MULTILINE) + split = rc.split(template_data) + tokens = [] + for statement in split: + if statement.startswith(" 0 and '=' not in params[0]: + # implicit identifier + name = params[0] + del params[0] + else: + # explicit identifier as a 'NAME' parameter + name = self.find_param("NAME", params) + self.DEB("TOKENIZER: NAME: " + str(name)) + return name + + def find_param(self, param, params): + """ Extract value of parameter from a statement. + @hidden + """ + for pair in params: + name, value = pair.split("=") + if not name or not value: + raise TemplateError, "Syntax error in template." + if name == param: + if value[0] == '"': + # The value is in double quotes. + ret_value = value[1:-1] + else: + # The value is without double quotes. + ret_value = value + self.DEB("TOKENIZER: PARAM: '%s' => '%s'" % (param, ret_value)) + return ret_value + else: + self.DEB("TOKENIZER: PARAM: '%s' => NOT DEFINED" % param) + return None + + +############################################## +# CLASS: Template # +############################################## + +class Template: + """ This class represents a compiled template. + + This class provides storage and methods for the compiled template + and associated metadata. It's serialized by pickle if we need to + save the compiled template to disk in a precompiled form. + + You should never instantiate this class directly. Always use the + TemplateManager or TemplateCompiler classes to + create the instances of this class. + + The only method which you can directly use is the is_uptodate + method. + """ + + def __init__(self, version, file, include_files, tokens, compile_params, + debug=0): + """ Constructor. + @hidden + """ + self._version = version + self._file = file + self._tokens = tokens + self._compile_params = compile_params + self._debug = debug + self._mtime = None + self._include_mtimes = {} + + if not file: + self.DEB("TEMPLATE WAS COMPILED FROM A STRING") + return + + # Save modifitcation time of the main template file. + if os.path.isfile(file): + self._mtime = os.path.getmtime(file) + else: + raise TemplateError, "Template: file does not exist: '%s'" % file + + # Save modificaton times of all included template files. + for inc_file in include_files: + if os.path.isfile(inc_file): + self._include_mtimes[inc_file] = os.path.getmtime(inc_file) + else: + raise TemplateError, "Template: file does not exist: '%s'"\ + % inc_file + + self.DEB("NEW TEMPLATE CREATED") + + def is_uptodate(self, compile_params=None): + """ Check whether the compiled template is uptodate. + + Return true if this compiled template is uptodate. + Return false, if the template source file was changed on the + disk since it was compiled. + Works by comparison of modification times. + Also takes modification times of all included templates + into account. + + @header is_uptodate(compile_params=None) + @return True if the template is uptodate, false otherwise. + + @param compile_params Only for internal use. + Do not use this optional parameter. It's intended only for + internal use by the TemplateManager. + """ + if not self._file: + self.DEB("TEMPLATE COMPILED FROM A STRING") + return 0 + + if self._version != __version__: + self.DEB("TEMPLATE: VERSION NOT UPTODATE") + return 0 + + if compile_params != None and compile_params != self._compile_params: + self.DEB("TEMPLATE: DIFFERENT COMPILATION PARAMS") + return 0 + + # Check modification times of the main template and all included + # templates. If the included template no longer exists, then + # the problem will be resolved when the template is recompiled. + + # Main template file. + if not (os.path.isfile(self._file) and \ + self._mtime == os.path.getmtime(self._file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + self._file) + return 0 + + # Included templates. + for inc_file in self._include_mtimes.keys(): + if not (os.path.isfile(inc_file) and \ + self._include_mtimes[inc_file] == \ + os.path.getmtime(inc_file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + inc_file) + return 0 + else: + self.DEB("TEMPLATE: UPTODATE") + return 1 + + def tokens(self): + """ Get tokens of this template. + @hidden + """ + return self._tokens + + def file(self): + """ Get filename of the main file of this template. + @hidden + """ + return self._file + + def debug(self, debug): + """ Get debugging state. + @hidden + """ + self._debug = debug + + ############################################## + # PRIVATE METHODS # + ############################################## + + def __getstate__(self): + """ Used by pickle when the class is serialized. + Remove the 'debug' attribute before serialization. + @hidden + """ + dict = copy.copy(self.__dict__) + del dict["_debug"] + return dict + + def __setstate__(self, dict): + """ Used by pickle when the class is unserialized. + Add the 'debug' attribute. + @hidden + """ + dict["_debug"] = 0 + self.__dict__ = dict + + + def DEB(self, str): + """ Print debugging message to stderr. + @hidden + """ + if self._debug: print >> sys.stderr, str + + +############################################## +# EXCEPTIONS # +############################################## + +class TemplateError(Exception): + """ Fatal exception. Raised on runtime or template syntax errors. + + This exception is raised when a runtime error occurs or when a syntax + error in the template is found. It has one parameter which always + is a string containing a description of the error. + + All potential IOError exceptions are handled by the module and are + converted to TemplateError exceptions. That means you should catch the + TemplateError exception if there is a possibility that for example + the template file will not be accesssible. + + The exception can be raised by constructors or by any method of any + class. + + The instance is no longer usable when this exception is raised. + """ + + def __init__(self, error): + """ Constructor. + @hidden + """ + Exception.__init__(self, "Htmltmpl error: " + error) + + +class PrecompiledError(Exception): + """ This exception is _PRIVATE_ and non fatal. + @hidden + """ + + def __init__(self, template): + """ Constructor. + @hidden + """ + Exception.__init__(self, template) + diff --git a/DJAGEN/trunk/djagen/gezegen/planet/sanitize.py b/DJAGEN/trunk/djagen/gezegen/planet/sanitize.py new file mode 100755 index 0000000..c98b14d --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/sanitize.py @@ -0,0 +1,354 @@ +""" +sanitize: bringing sanitiy to world of messed-up data +""" + +__author__ = ["Mark Pilgrim ", + "Aaron Swartz "] +__contributors__ = ["Sam Ruby "] +__license__ = "BSD" +__version__ = "0.25" + +_debug = 0 + +# If you want sanitize to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +import sgmllib, re + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 + + _chardet = lambda data: chardet.detect(data)['encoding'] +except: + chardet = None + _chardet = lambda data: None + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + _r_barebang = re.compile(r'') + + def __init__(self, encoding): + self.encoding = encoding + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + def feed(self, data): + data = self._r_barebang.sub(r'<!\1', data) + data = self._r_bareamp.sub("&", data) + data = self._r_shorttag.sub(self._shorttag_replace, data) + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + + def normalize_attrs(self, attrs): + # utility method to be called by descendants + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
    , tag='pre', attrs=[('class', 'screen')]
    +        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
    +        uattrs = []
    +        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
    +        for key, value in attrs:
    +            if type(value) != type(u''):
    +                value = unicode(value, self.encoding)
    +            uattrs.append((unicode(key, self.encoding), value))
    +        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
    +        if tag in self.elements_no_end_tag:
    +            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
    +        else:
    +            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
    +
    +    def unknown_endtag(self, tag):
    +        # called for each end tag, e.g. for 
    , tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + self.pieces.append('&%(ref)s;' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', + 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', + 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', + 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', + 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'textarea', 'tbody', 'td', 'tfoot', 'th', + 'thead', 'tr', 'tt', 'u', 'ul', 'var'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', + 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', + 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', + 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', + 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', + 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', + 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', + 'usemap', 'valign', 'value', 'vspace', 'width'] + + ignorable_elements = ['script', 'applet', 'style'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.tag_stack = [] + self.ignore_level = 0 + + def feed(self, data): + _BaseHTMLProcessor.feed(self, data) + while self.tag_stack: + _BaseHTMLProcessor.unknown_endtag(self, self.tag_stack.pop()) + + def unknown_starttag(self, tag, attrs): + if tag in self.ignorable_elements: + self.ignore_level += 1 + return + + if self.ignore_level: + return + + if tag in self.acceptable_elements: + attrs = self.normalize_attrs(attrs) + attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] + if tag not in self.elements_no_end_tag: + self.tag_stack.append(tag) + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + + def unknown_endtag(self, tag): + if tag in self.ignorable_elements: + self.ignore_level -= 1 + return + + if self.ignore_level: + return + + if tag in self.acceptable_elements and tag not in self.elements_no_end_tag: + match = False + while self.tag_stack: + top = self.tag_stack.pop() + if top == tag: + match = True + break + _BaseHTMLProcessor.unknown_endtag(self, top) + + if match: + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.ignore_level: + text = text.replace('<', '') + _BaseHTMLProcessor.handle_data(self, text) + +def HTML(htmlSource, encoding='utf8'): + p = _HTMLSanitizer(encoding) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count(''): + data = data.split('>', 1)[1] + if data.count('' % self.url) + + def test_changedurl(self): + # change the URL directly + self.channel.url = self.changed_url + self.assertEqual(self.channel.feed_information(), + "<%s> (formerly <%s>)" % (self.changed_url, self.url)) + +if __name__ == '__main__': + unittest.main() diff --git a/DJAGEN/trunk/djagen/gezegen/planet/tests/test_main.py b/DJAGEN/trunk/djagen/gezegen/planet/tests/test_main.py new file mode 100755 index 0000000..c2be62d --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/tests/test_main.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +import os, sys, shutil, errno, unittest +from ConfigParser import ConfigParser +from StringIO import StringIO +import planet + +class MainTest(unittest.TestCase): + + def test_minimal(self): + configp = ConfigParser() + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", []) + + def test_onefeed(self): + configp = ConfigParser() + configp.readfp(StringIO("""[http://www.example.com/] +name = Mary +""")) + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", [], True) + + + def test_generateall(self): + configp = ConfigParser() + configp.readfp(StringIO("""[http://www.example.com/] +name = Mary +""")) + my_planet = planet.Planet(configp) + my_planet.run("Planet Name", "http://example.com", [], True) + basedir = os.path.join(os.path.dirname(os.path.abspath(sys.modules[__name__].__file__)), 'data') + os.mkdir(self.output_dir) + t_file_names = ['simple', 'simple2'] + self._remove_cached_templates(basedir, t_file_names) + t_files = [os.path.join(basedir, t_file) + '.tmpl' for t_file in t_file_names] + my_planet.generate_all_files(t_files, "Planet Name", + 'http://example.com/', 'http://example.com/feed/', 'Mary', 'mary@example.com') + for file_name in t_file_names: + name = os.path.join(self.output_dir, file_name) + content = file(name).read() + self.assertEqual(content, 'Mary\n') + + def _remove_cached_templates(self, basedir, template_files): + """ + Remove the .tmplc files and force them to be rebuilt. + + This is required mainly so that the tests don't fail in mysterious ways in + directories that have been moved, eg 'branches/my-branch' to + 'branches/mysterious-branch' -- the .tmplc files seem to remember their full + path + """ + for file in template_files: + path = os.path.join(basedir, file + '.tmplc') + try: + os.remove(path) + except OSError, e: + # we don't care about the file not being there, we care about + # everything else + if e.errno != errno.ENOENT: + raise + + def setUp(self): + super(MainTest, self).setUp() + self.output_dir = 'output' + + def tearDown(self): + super(MainTest, self).tearDown() + shutil.rmtree(self.output_dir, ignore_errors = True) + shutil.rmtree('cache', ignore_errors = True) + +if __name__ == '__main__': + unittest.main() diff --git a/DJAGEN/trunk/djagen/gezegen/planet/tests/test_sanitize.py b/DJAGEN/trunk/djagen/gezegen/planet/tests/test_sanitize.py new file mode 100755 index 0000000..f0f1d42 --- /dev/null +++ b/DJAGEN/trunk/djagen/gezegen/planet/tests/test_sanitize.py @@ -0,0 +1,125 @@ +# adapted from http://www.iamcal.com/publish/articles/php/processing_html_part_2/ +# and from http://feedparser.org/tests/wellformed/sanitize/ +# by Aaron Swartz, 2006, public domain + +import unittest, new +from planet import sanitize + +class SanitizeTest(unittest.TestCase): pass + +# each call to HTML adds a test case to SanitizeTest +testcases = 0 +def HTML(a, b): + global testcases + testcases += 1 + func = lambda self: self.assertEqual(sanitize.HTML(a), b) + method = new.instancemethod(func, None, SanitizeTest) + setattr(SanitizeTest, "test_%d" % testcases, method) + +## basics +HTML("","") +HTML("hello","hello") + +## balancing tags +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("hello","hello") +HTML("","") + +## trailing slashes +HTML('','') +HTML('','') +HTML('','') + +## balancing angle brakets +HTML('','b>') +HTML('','>') +HTML('foofoo','b>foo') +HTML('>') +HTML('b><','b>') +HTML('>','>') + +## attributes +HTML('','') +HTML('','') +HTML('','') + +## dangerous tags (a small sample) +sHTML = lambda x: HTML(x, 'safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') +sHTML('safe description') + +for x in ['onabort', 'onblur', 'onchange', 'onclick', 'ondblclick', 'onerror', 'onfocus', 'onkeydown', 'onkeypress', 'onkeyup', 'onload', 'onmousedown', 'onmouseout', 'onmouseover', 'onmouseup', 'onreset', 'resize', 'onsubmit', 'onunload']: + HTML('' % x, + '') + +HTML('never trust your upstream platypus', 'never trust your upstream platypus') + +## ignorables +HTML('foo', 'foo') + +## non-allowed tags +HTML('','') +HTML('\r\n\r\n\r\n\r\n\r\nfunction executeMe()\r\n{\r\n\r\n\r\n\r\n\r\n/* + + + +
    + + + + + + + + +
    +
    +

    Gezegen Arşivi

    + + {% if not archive_month %} + Gezegen arşivinde {{ archive_year }} yılına ait toplam, {{ entries_list|length }} adet girdi mevcuttur.

    + Aylar:
    + {% for entry in entries_list %} +{% ifchanged %} +|{{ entry.date|date:"F" }}| +
    +{% endifchanged %} + +{% endfor %} +{% else %} +Gezegen arşivinde {{ archive_year }} yılı, {{ archive_month }}. ayına ait toplam, {{ entries_list|length }} adet girdi mevcuttur.

    +{% endif %} + +
    + {% comment %} + + + {% endcomment %} + +
    +

    Takip edin

    + +
    + + + +
    +

    Diğer Gezegenler

    + +
    + +
    +

    Güncelleme

    +

    Gezegen her 10 dakikada bir yenilenir.

    +

    + Son güncelleme: +
    + {{ run_time.get_run_time }}

    +
    + +
    +

    İletişim

    +

    + Linux Gezegeni Gezegen Ekibi tarafından yönetilmektedir, Gezegen hakkındaki sorularınızı ve Gezegen'e iniş başvurularınızı e-posta ile iletebilirsiniz. +

    +

    + Gezegene iniş başvurularınızda Gezegen Kuralları'na uyan RSS/Atom beslemenizi ve gezegen içerisinde kullanmak istediğiniz (en fazla 80x80 çözünürlüklü) resminizi (bir başka deyişle hackergotchi); varsa jabber adresinizle birlikte e-posta yoluyla göndermenizi rica ediyoruz. +

    +
    + + {% ifnotequal p_entries_list.paginator.num_pages 1 %} + +
    +{% endifnotequal %} + + + + + + + + + + + diff --git a/DJAGEN/trunk/djagen/templates/main.tmpl b/DJAGEN/trunk/djagen/templates/main.tmpl new file mode 100755 index 0000000..d0b6a38 --- /dev/null +++ b/DJAGEN/trunk/djagen/templates/main.tmpl @@ -0,0 +1,326 @@ + + + + Linux Gezegeni + + + + + + + + + + + + + + + +
    + + +
    + + +
    + + {% for entry in entries_list|slice:items_per_page %} + {% autoescape off %} + + {% ifequal entry.entry_id.is_approved 1 %} + + {% ifchanged %}

    {{ entry.date|date:"d F Y" }}

    {% endifchanged %} + + +
    + + +
    +
    +
    +

    + {{ entry.title }} +

    +
    +
    +
    + + + {{ entry.content_html|truncatewords_html:truncate_words }} + +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    + {% endifequal %} + + {% endautoescape %} + + {% endfor %} + + + + + + + + + + + + + + diff --git a/DJAGEN/trunk/djagen/templates/main/base.html b/DJAGEN/trunk/djagen/templates/main/base.html new file mode 100755 index 0000000..963c530 --- /dev/null +++ b/DJAGEN/trunk/djagen/templates/main/base.html @@ -0,0 +1,90 @@ +{% load i18n %} + + + + {% block head %} + + + + {% trans "Linux Gezegeni" %} + + + + + + + + + + + {% endblock %} + +
    +
    + RSS + Atom +
    +
    + +
    + +
    + {% block menu %} + + + {% endblock %} +
    + +

    Gezegen her 10 dakikada bir yenilenir. Son güncelleme: {{ run_time.get_run_time }}

    + +
    + + {% block body %} + {% endblock %} + + +
    + + {% block footer%} + + {% endblock %} + + + + + +
    + + diff --git a/DJAGEN/trunk/djagen/templates/main/feeds.html b/DJAGEN/trunk/djagen/templates/main/feeds.html new file mode 100755 index 0000000..f2bd421 --- /dev/null +++ b/DJAGEN/trunk/djagen/templates/main/feeds.html @@ -0,0 +1,26 @@ +
    + +
    diff --git a/DJAGEN/trunk/djagen/templates/main/index.html b/DJAGEN/trunk/djagen/templates/main/index.html new file mode 100755 index 0000000..35a41a3 --- /dev/null +++ b/DJAGEN/trunk/djagen/templates/main/index.html @@ -0,0 +1,915 @@ + + + + Linux Gezegeni + + + + + + + + + + + + + + + + + + + + + +
    + + +
    + + +
    +

    16 Mart 2010

    + +
    + + +
    +
    +
    +

    + +Yakın Doğu’da Seminer Rüzgarları +

    +
    +
    +
    +

    Geçen haftadan beri Yakın Doğu Üniversitesi’nde Linux ve Özgür Yazılım seminerleri düzenliyoruz. İlk seminerimiz Linux Nedir? idi. Uzun zamandan beri dinlediğin en eğlenceli Linux Nedir’lerden birisi idi. 3.30 saat sürmesine rağmen konuşmacı Ali Erdinç Köroğlu’nun eğlenceli anlatımı ile zaman su gibi aktı denebilir.

    +

    Yakın Doğu’ya ilk geldiğim zamanlarda ben de bir Linux Nedir semineri vermiştim. O zamanki katılımcı durumu işte aşağıdaki gibi idi.

    +

    +

    Aslında çoğunluğunu öğretmenlerin oluşturması gereken katılımcı kitlesini, öğrenciler oluşturuyor idi. 1.30 saatlik bir anlatım ile katılımcılara Linux Nedir anlatmıştım. Seminer süresince hiç soru gelmemişti. Seminerden sonra yanıma gelip soru soranlar da olunca, epey mutlu olmuştum.

    +

    +

    Şimdiki durumda katılımcı sayısı azımsanmayacak kadar olması yanında, daha ilgili bir kalabalığın katıldığını düşünüyorum.

    +

    +

    Ali Erdinc’in de epey bir eğlenceli anlatımı olduğunu, dinleyicinin dikkatini çekmek için onları arada dürttüğünü de belirtmek lazım.

    +

    +

    Bu seminerler dizisi Mayıs ayına kadar devam edecek. Meraklısı için üniversite duyuru sayfası, Facebook ve Twitter‘dan takip edebileceklerini söyleyelim. Hatta Kıbrıs’ta olanlar için üniversitede her Cuma akşamları ve Cumartesi günleri sinema gösterimleri yaptığımızı da belirtelim. Biz izlerken eğleniyoruz. Bekleriz.

    +

    Lefkoşa’ya bahar geldi denebilir. Oğuz Yarımtepe Kıbrıs’tan bildirdi.

    +

    +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    +

    05 Şubat 2010

    + +
    + + +
    +
    +
    +

    + +100 ml +

    +
    +
    +
    +

    1 Ocak 2010 tarihinden itibaren uçuşlarda sıvı kısıtlaması uygulanıyor. El bagajlarında 100 mlyi geçen sıvılara, jel, krem vb. el konuyor. Yılbaşında Kıbrıs’a dönerken başıma bir olay gelince ne menem bir şeydir diye araştırdığım bir konu oldu. Sırt çantamda 150 ml Dove krem var idi. Kremi epeydir kullanıyordum. Saat 5.30 gibi uçağa binmek için son kontrol noktasında bekliyorduk. Kontrol için güvenlik gelebilirsiniz dedikten sonra ben de  çantayı cihaza bırakıp geçtim. Cihazın başındaki görevli bu çantada sıvı bir şey var diye seslendi bana. Ön gözde dedi. Evet krem var dedim. Açtı. Baktı 150 ml bir krem. 100 ml üzerini alamıyoruz dedi. Ben de uyku sersemi, o kullanılmış bir kutu, içindeki belki 100 mlnin altındadır dedim. Görevli gülümsedi. O da görevini yapıyordu. Ayakta duran bayan sert bir şekilde kutuyu aldı, baktı, bizim için bunu hacmi önemli dedi. Açıkcası tartışmanın bir anlamı yoktu. Onlar da bir kuralı uyguluyorlardı. Elimle söylendiği gibi para verip aldığım kremi çöpe attım.

    +

    Şimdi olayın benim açımdan garip noktalarına gelelim

    +

    * Güvenlik görevlisi kutunun kaç ml olduğuna üzerine bakarak karar verdi. Yani bir dahaki sefere sırf sistemi denemek için, aynı kremden alıp, üzerindeki 150 yi 100 yaparsam geçer muhtemelen.

    +

    * Görevli içine açıp bakmadı bile. Bana sordu ne var diye. Yani içine şu epeydir ortalarda olmayan domuz gribi koysam dahi bilgisi olamazdı.

    +

    * Elimle çöpe attım, o çok koydu.

    +

    Ben de bunun üzerine Ulaştırma Bakanlığı’na bir eposta attım. Epostam Sivil Havacılık Dairesine yönlendirilmiş ve bir yanıt geldi. Kısaca, bu 100 ml uygulaması 10 Ağustos 2006′da İngiltere’de ortaya çıkan terörist plan sonrasında sivil havacılık gündemine girmiş. 6 Kasım 2006′da çıkarılan 1546/2006 tüzüğü ile tüm AB üyesi ülkelerde ve İsviçre, İzlanda ve Norveç’te, ABD ve Kanada’da uygulanmaya başlamış. Türkiye de ECAC üyesi bir devlet olduğundan tavsiye kararına uyarak bu uygulamayı diğer devletlerdeki gibi aynı kurallar dahilinde uygulamaya başlamış. Neden 100 ml peki? Birleşmiş Milletler bünyesindeki Patlayıcılar Çalışma Grubunda yapılan araştırma, testler ve risk değerlendirmesi neticesinde sıvıların 100 ml lik kaplarda 1 litreklik poşette taşınması halinde (1 lt’lik poşete yaklaşık 6 adet kap sığmaktaymış) uçuşun emniyetini tehlikeye düşürmeyeceği sonucuna varılmış. Bilim adamları araştırmış bulmuş, bir şey diyemeyecem bu konuda. Peki bizde 100 ml olduğu nasıl anlaşılıyor? Baya, ya size soruyorlar ya da üzerindeki yazıları okuyorlar. Yani ben teroristlik yapmak istesem, alırım 200 ml sıvı patlayıcı, koyarım Dove krem kutusuna, yazıları bir güzel 100 diye düzenlerim, sorarlarsa da 100 der geçerim. Peki biz neden illa 100 diye uyguluyoruz? E çünkü diğer ülkelerde de öyle de ondan. Epostadaki şu satırlara bakalım:

    +

    “Ülkemiz yukarıda adı geçen uluslarası kuruluşların aldığı kararları  ve berlilediği standartları uygulamakla yükümlüdür.”

    +

    Bu konudaki uygulama diğer ülkelerde hangi standartlarda uygulanıyor bilmiyorum. Belki de sadece Kıbrıs uçuşlarında bir acayiplik vardır. Standart denilen kavram sadece 100 sayısına bağlı bir şeydir diye de anlaşılıyor olabilir.

    +

    Siz siz olun, uçağa binerken el bagajınızda üzerinde 100 ml üzeri bir şey yazan herhangi bir kap bulundurmayın. İçi boş dolu farketmez.

    +
    +
    +
    + + + + + + + + + +
    +
    + +
    +
    +
    +

    29 Ocak 2010

    + +
    + + +
    +
    +
    +

    + +Artık Sun yok! +

    +
    +
    +
    +

    iPsunoraclead haberleri arasında kaybolup gidiyor ama Oracle uzun süren Sun’ı satın alma işlemini bitirdi. Artık www.sun.com adresine girdiğinizde sizi doğrudan Oracle sitesine yönlendiriyor.

    +

    Beni en çok ilgilendiren konular ise Sun’ın özgür yazılım projelerine devam edilip edilmeyeceği konularında ise şimdilik olumlu haberler geliyor. Bütün bu projeler içerisinde devam edilmeyeceği açıklanan tek proje şimdilik Kenai.

    +

    Umarım hepimiz için mutlu son olur…

    +

    Ek: Kültür Mantarı‘nın yönlendirmesi ile James Gosling’in bu konu ile ilgili blogunu gördüm ve ordaki görselin de burada saklanmasının iyi olacağını düşünüp buraya kopyaladım…

    +

    sunrip


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    24 Aralık 2009

    + +
    + + +
    +
    +
    +

    + +EMO 13. Ulusal Kongresi +

    +
    +
    +
    +

    EMO’nun 23-26 Aralıkta ODTÜ de gerçekleşecek olan 13. Ulusal Kongresi kapsamında 25 Aralık Cuma günü 9:30-11:15 arasında Özgür Yazılım başlılklı özel oturumda “Özgür Yazılımların Uygulama Geliştirme Modeline Etkisi; Tekir’den Öğrendiklerimiz” ve 11.30-12.30 arasında da “Özgür Yazılımın Ekonomik ve Sosyal Yönleri” sunumlarını yapıyorum.

    +

    Genel olarak yüklü bir programı olan bu etkinlikte çeşitli LKD seminerleri de olacak. Buyrunuz geliniz!


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    24 Eylül 2009

    + +
    + + +
    +
    +
    +

    + +Intel, Atom, Moblin +

    +
    +
    +
    +

    Intel Atom işlemcileri ile hayatın her yerinde yer alamak istiyor. x86 tabanlı Atom işlemciler programcılara normal bilgisayarlar için yazılmış uygulamalarını çok fazla değişikliğe uğratmaya gerek kalmadan mobil cihazlarda çalıştırabilmesine olanak sağlıyor. Bu da Intel’e önemli avantajlar sağlıyor. Bu avantajını daha da arttırmak için cihazar üzerinde performansı arttıracak işletim sistemi için de kolları sıvayıp Moblin’i geliştirmeye başlamışlardı. Dün bu konular üzerine Intel’den üç önemli açıklama oldu…

    +

    Atom işlemcili cihazlarda uygulama performansını arttırmak için yeni bir geliştirici programı başlattılar. Atom Developer Program‘ı teşvik etmek içinde bir yarışma başlattılar. Bence bir göz atmakta fayda var… ( Ben kayıt olacağım :) )

    +

    İkinci ve üçüncü açıklamalar ise bir arada geldi, Moblin’in yeni sürümü 2.1 yayınlandı ve Atom işlemcili bir akıllı telefon üzerinde sunuldu. Intel bir çırpıda bir dolu firmaya rakip oldu :) Geçenlerde de yazmıştım,  önümüzdeki yıl içerisinde mobil dünyada bir dolu ilginç gelişmeler bekliyorum. Umarım bu rekabetten özgür yazılım ve biz kullanıcılar kazançlı çıkarız…


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    25 Ağustos 2009

    + +
    + + +
    +
    +
    +

    + +Teknik Destek Kopya Kağıtı +

    +
    +
    +
    +

    xkcd’de geçen gün yayınlanan bu teknik destek kopya kağıdını pek beğendim ve Türkçe bir sürümünü yapayım dedim.

    +

    teknikdestek
    +İsteyenler için ODF hali de burada


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    18 Ağustos 2009

    + +
    + + +
    +
    +
    +

    + +Korsan Değil “Fikir Hırsızı” +

    +
    +
    +
    +

    Kültür ve Turizm Bakanlığı, Fikir ve Sanat Eserleri Kanunu’nda değişiklik yapılarak İnternet üzerinden müzik, film, kitap ve benzeri şeyleri indirenlerinde ceza almasını sağlamak için çalışma başlatmış. Bu denetimi yapabilmek için de internet servis sağlayıcıları ile birlikte çalışacaklarmış.

    +

    Her köşe başında kurulan tezgahlarda kitap, cd, dvd satan arkadaşlar hiç bir sorun ile karşılaşmadan bu işi yaparken, bunun için bildiğim kadarıyla yasal düzenlemeler zaten var, onlarla mücadele etmek yerine, internetten akan tarfiği denetleyecekler. Bu denetim sırasında da müzik mi yoksa sevilinizden gelen e-postanızı mı indirdiğiniz fark etmeyecek, dinleyecekler. Ayrıca indirdiğiniz müziğin yasal mı yoksa yasa dışımı olduğunu bir çırpıda anlayacaklar. Bu arada, haberden eğer yanlış okumadıysam,  yapılan operasyonda makinenizde çıkan parça sayısı kadar da görevlilere prim verilecek :) Yani büyük birader izlemeye ve istediğinde yasaklamaya olanak sağlayacak yeni yollar arıyor…

    +

    Bütün bunlardan fikir haklarına saygı duymadığım anlaşılmasın tam tersine korsana, fikir hırsızlığına kesinlikle karşıyım. Fakat bütün bunların bahane edilerek kişisel iletişimin ihlal edilmesine daha çok karşıyım.

    +

    Son olarak bir haber daha verelim Pirate Bay’in 23 GB’lik arşivi de paylaşıma açılmış. Bu arşiv içerisinde yasal olmayan şeyler olabilir ama yasal olarak paylaşılan da bir çok eser var. Sizler yasal olanlarını indirin :) Korsan değil özgür yazılım kullanın!


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    07 Temmuz 2009

    + +
    + + +
    +
    +
    +

    + +Mobil Cihazlar Dünyasında Neler Oluyor? +

    +
    +
    +
    +

    moblinBir süredir mobil cihazlar dünyası hareketlenmiş durumda. Apple iPhone ile birlikte mobil telefon dünyasında ciddi bir hareketlenme olmuştu. Palm, Nokia, Sony-Ericson, BlackBerry gibi sektörün önde gelen firmalarına Apple ciddi bir rakip olarak ortaya çıkmış ardından da Google Android ile bir platform olarak henüz yeterince destekleyen donanım olmasa bile vaadettikleri ile dikkatleri üzerine çekmişti. Android, WebOS ve iPhone OS‘a  karşı Symbian‘ı savunmaya devam eden Nokia, elinde olmayan hisselerini de alarak,  bir vakıf kurup Symbiyan’ı açık kaynak kodlu olarak  bu vakfa devretmişti.

    +

    Tam da bu esnada Intel Atom işlemcisi ile düşük kaynak kullanan PC’lerin geliştirilmesine olanak sağladı ve NetBook’lar geçtiğimiz yıl içinde popüler cihazlar arasına girdiler.

    +

    Bu yıl ise Intel, Mobile Internet Device ( MID - Mobil Internet Aracı ) üzerine ciddi yatırımlar yapmaya başladı. Hatta bu cihazların cazibesini arttırmak için özel bir linux dağıtımına bile başladı : Moblin.

    +

    Moblin’e destek konusunda Intel önce Canonical ile anlaşmıştı. Daha sonra Canonical NetBook dağıtımı olarak Nokia’nın kendi tabletlerinde kullanmak amacıyla ürettiği Maemo‘yu desteklemeye karar verdiğini açıkladı. Intel’de Moblin’i Linux Vakfı’na devrettiğini ve destek konusunda da Novell’le anlaştığını ilan etti. İki hafta önce detayları belirtilmeyen bir Nokia - Intel anlaşması ilan edildi. Genel olarak yorumlanan ise  Nokia’nın daha becerikli telefonlar üretmek için Intel teknolojilerini kullanacağı bu arada da Moblin ile Maemo arasında bir seçim yapıp güçlerini birleştirecekleri yönündeydi. Bugün Nokia, Android temelli telefonlar üretmeyeceğini ve GTK+ temelli olan Maemo’yu Qt’ye taşıyacağını ilan etti.

    +

    İşte benim sorularımın temelini de burası oluşturuyor. Qt temelli bir Maemo’yu Canonical desteklemeye devam edecek mi? Nokia Intel işlemcili MID’ler üretip bunlarda Mameo’mu koşturacak yoksa Intel işlemcili telefonlar üretip Symbian’ı rakipleri kadar becerikli yepyeni bir hale mi dönüştürecek? Intel MID’ler konusunda neler planlıyor? Bu planları içerisinde Moblin’i desteklemeye devam etmek var mı yoksa Nokia ile birlikte Maemo’ya yatırım mı yapacaklar? NetBook’larda da kullanılmaya başlayan Android de bu üretilecek donanımlar için bir alternatif olacaklar mı?

    +

    Hepsinden önemlisi bütün bunların sonucunda ortaya çıkacak olan, biz tüketiciler için ucuz ve becerikli donanımlar mı yoksa; bir biri ile uyumsuz bir dolu daha oyuncak mı?


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    17 Haziran 2009

    + +
    + + +
    +
    +
    +

    + +LKD Genel Kurulu için Ankara’ya +

    +
    +
    +
    +

    Bu hafta sonu gerçekleştirilecek LKD Genel Kurulu için Ankara’ya gidiyoruz. Aşağıdaki yazı bu yolculuk için organizasyon yapmaya çalışan  Volkan’dan…

    +

    ***

    +

    Ankara yerine Bağdata gitsem bu kadar koştururdum herhalde,

    +

    TCDD : en teknolojik YHT çalıştıran, 5 saaat 28 dk Ankaraya ulaştıran koskoca
    +kurum.
    +Evet bu kurum malesef bilet satmak istemiyor.

    +

    1- web sitesi windows ve Internet explorer bağımlısı. Öncelikle böyle bir
    +sisteme sahip olmanız gerekli. (MAC ve Linux kullanıcıları tren yolcuları
    +portföyünde yer almıyor. Onlar uçak veya otobüs severler.!)

    +

    2- web sitesindeki bilet satış uygulamasında banka sıra makinelerinin bir
    +türevi sadece. Sıradan boş koltuk veriyor. Pulman vagonlarında ilk 6 koltuk
    +karşılıklı bakar durumda, son 3 koltukda geriye yatamaz durumda. Bilin
    +bakalım verdiği ilk koltuklar nereleri ? Evet bildiniz bunlar. Farklı bir
    +koltuk veya vagon seçemiyorsunuz. Seçilebilecek şeyler bayan yanı ve
    +internet. Onlarında ne kadar gerçek seçimlere izin verildiği şüpheli.
    +(İnternet olsun dedim, sonuç yok dedi.)

    +

    3- PTT şubeleri tren bilet satacak acenteler olarak duyuruluyor. Gidiyorsunuz,
    +veee… Evet, biz satıyoruz, ama siteye girebilirsek diyorlar. Ne oldu, tabii
    +ki giremediler. 10dk sıra beklediniğiniz için teşekkür ederiz.

    +

    4- Acente, komisyon karşılığı TCDD bileti satanlar. Gidiyoruz birine, bize
    +bilet lazım satabiliyor musunuz? diye soruyorum. Tabii buyrun diyorlar. Gidiş
    +dönüş 1 tam 1 öğrenci istiyorum. Satıcı önce
    +- G/D kesmiyorum diyor buradan.!
    +- Nasıl yani?
    +- Fark yok zaten,ayrı ayrı keseyim. Fiyatı farklı mı ki?
    +Başka bir arkadaşı düzeltiyor, aynı express olursa kesebilirsin.
    +- Elbette G/D niye alayım indirim var diyorum.
    +Neyse girip deniyor, gelen koltuk numaralarını soruyorum.
    +- 4,5 diyor. (İlk altı koltuk içinden boş olanlar)
    +- Değiştiremiyor musunuz?
    +- Malesef.
    +- Internet sürümüne mi giriyorsunuz diyorum ister istemez.
    +- Hayır biz acente olarak giriyoruz ama fark yok. cevabı geliyor. (Tahmininen
    +üzerine ek komisyon ekleniyor sadece.)
    +- Kim koltuk seçtiriyor bana ?
    +- Gardan alabilirsiniz, Haydarpaşa veya Sirkeci.

    +

    5- Rotamız Sirkeci garı. Bir otobüs ve tramvay ile ulaşıyorum.
    +Bende dil yandı ya, ilk soru Fatih expresine bilet istiyorum, ama koltuk
    +seçebiliyor musunuz?
    +- Bakalım yer boş olursa seçebiliriz diyor satıcı bu kez.
    +- Ohh nihayet.
    +- 1 tam 1 öğrenci G/D istiyorum, artı 1 öğrenci sadece gidiş.
    +- Öğrencide G/D farkı yok cevabı geliyor.
    +- Biliyorum, tam da var onun için söylüyorum.(Bilgi: Tam bileti G/D alırsanız
    +öğrenci bileti ile aynı fiyat, garip.G/D alacaksanız öğrenciliğiniz işe
    +yaramıyor. Yani pasoya gerek yok. Tespit: Öğrenciler hep tek yö seyahat
    +eder.)
    +- Kredi kartımı, peşin mi?
    +- DIINN ! kredi kartı.. var dimi?
    +- Evet, 112 TL
    +- Buyrun, zııttt pıırtt iki tak tak bi laklak biletler ve pos slipi elimde.

    +

    Gişenin önünden ayrılmadan biletleri tren, tarih, yer vs. doğru mu diye
    +kontrol ediyorum. Elimde biletler teşekkür edip ayrılırken, 1,5 saatte ancak
    +bir alış veriş yapmış oluyorum.  Daha bir de geri dönüş yolu var.

    +

    Velhasıl,
    +Gidiş : 18/06/2009 Perşembe 23:30 Haydarpaşa Vagon:X Koltuk: XX-XX-XX
    +Gidiş : 20/06/2009 Cumartesi 23:30 Ankara Vagon:X Koltuk: XX-XX

    +

    Hayırlı yolculuklar.

    +

    =====================
    +Dipnot-1: Bu yerleri aldığım 1. vagon haricinde 2 vagon tamamen boş görünüyor
    +daha. 2-3 nolarda satılan yerler var.

    +

    Dipnot-2: Ben telefonla iş yapmaya alışamamış biri olarak, rezervasyon veya
    +satış işlemi var mı diye hiç peşine düşmedim. Orada da farklı bir macera sizi
    +bekliyor olabilir, kimbilir?

    +

    Dipnot-3: Yataklı vagonlarda alt-üst yatak seçme şansınız olabilir mi sizce?


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    16 Haziran 2009

    + +
    + + +
    +
    +
    +

    + +IE, WTW ve Gıda Yardımı +

    +
    +
    +
    +

    wfp-wtwBugünlerde dünya üzerindeki açlık, gıda yardımları ve bunlara ait haberler her zamankinden daha fazla ilgimi çekiyor. Dolayısıyla Microsoft’un yeni kampanyası ilgimi çekti. Microsoft İnternet Tarayıcısının yeni sürümünü daha iyi duyurabilmek için gıda yardımı üzerine kurulu bir kampanya başlatmış. IE8′in her tam indirilmesine karşılık 8 öğün yemek bağışında bulunacakmış. Detaylara buradan ulaşabilirsiniz…

    +

    Bu konu ile ilgili de bir dolu tartışma gündeme geldi tabii ki, örneğin TechCrunch‘da kampanyaya dair bir dolu yazı ve tartışma var. Ben kendi adıma Linux üzerinde zaten çalışmayan bu tarayıcıyı indirip biraz ağ zamanı harcayıp bağışta bulunsam mı, zaten IE kullananların hatalı çalışan eski sürümler yerine CSS ve JS ile ilgili bir dolu hatanın düzeltildiği bu yeni sürüme geçmelerini teşvik etsem mi, yoksa hiç sesimi çıkarmasam mı bilemedim. Ardından da bu haberi bahane edip daha fazlası ile yazayım dedim.

    +

    İster IE8 indirin isterseniz aşağıdaki organizasyonların sitelerini ziyaret edip dünya üzerindeki açlık ve fakirlikle mücadeleye katkıda bulunmak için yapabileceklerinizi öğrenin… Bunların içerisinde özellikle Birleşmiş Milletler Dünya Gıda Programı’nın Walk The Web kampanyasına bir göz atmanızı öneririm…

    + +

    Son olarak da bugünlerde herkese önerdiğim gibi Yuva ( Home ) belgeselini izlemenizi şiddetle tavsiye ederim.


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    28 Mayıs 2009

    + +
    + + +
    +
    +
    +

    + +TBD Bilişim Kongresi’nde Özgür Yazılım Paneli +

    +
    +
    +
    +

    TBD’nin bu yıl 3.sünü düzenlediği İstanbul Bilişim Kongresi‘nde Pazar günü saat 14:00′de Özgür Yazılım Paneli olacaktır. Panel’de özgür yazılım ve iş modelleri üzerinde durulacaktır. İlgilenenlere duyurulur…

    +

    Yer: Marmara Üniversitesi Nişantaşı Kampüsü
    +Erdal İnönü Bilim ve Kültür Merkezi
    +Tarih: 31 Mayıs Pazar, 14:00 - 15:20
    +Oturum başkanı: Görkem Çetin
    +Konuşmacılar: Enver Altın, Hakan Uygun, Cahit Cengizhan


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +

    13 Nisan 2009

    + +
    + + +
    +
    +
    +

    + +Sıralama Algoritmaları +

    +
    +
    +
    +

    Sıralama algoritmaları, programcılığa girişi oluşturan en temel şeylerdendir. Özellikle aynı problemin çözümü için farklı yöntemlerin nasıl farklı sonuçlar verdiğini görmek için şahane örneklerdir. Daha da iyisi bu farklı algoritmaların görsel olarak karşılaştırmasıdır. İşte tam da bu işi başarıyla yapan bu siteye bakmanızı şiddetle tavsiye ederim. Sadece farklı algoritmaları görsel karşılaştırmasını değil, her algoritmanın farklı veri kümelerinde davranış biçimini ve detaylı karşılaştırmalarını da bulabilirsiniz…


    +
    +
    + + + + + + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + diff --git a/DJAGEN/trunk/djagen/templates/main/main.html b/DJAGEN/trunk/djagen/templates/main/main.html new file mode 100755 index 0000000..6c178fa --- /dev/null +++ b/DJAGEN/trunk/djagen/templates/main/main.html @@ -0,0 +1,36 @@ +{% extends "main/base.html" %} + + + {% block body %} + + {% for entry in entries_list|slice:items_per_page %} + {% autoescape off %} + + {% ifequal entry.entry_id.is_approved 1 %} + + {% ifchanged entry.date.day entry.date.month entry.date.year %}
    {% endifchanged %} + + {% ifchanged %}

    {{ entry.date|date:"d F Y" }}

    {% endifchanged %} + +
    + + +

    {{ entry.title }}

    +

    + Yazar: {{ entry.entry_id.author_name }} + Tarih: {{ entry.date|date:"d F Y H:i" }} +

    +
    + {{ entry.content_html|truncatewords_html:truncate_words }} +
    + {% endifequal %} + {% endautoescape %} + +
    + + + {% endfor %} + + + {% endblock %} + diff --git a/DJAGEN/trunk/djagen/templates/main/members.html b/DJAGEN/trunk/djagen/templates/main/members.html new file mode 100755 index 0000000..93eb28a --- /dev/null +++ b/DJAGEN/trunk/djagen/templates/main/members.html @@ -0,0 +1,16 @@ +{% extends "main/base.html" %} + + {% block body %} + + + + + {% endblock %} diff --git a/DJAGEN/trunk/djagen/templates/main/query.html b/DJAGEN/trunk/djagen/templates/main/query.html new file mode 100755 index 0000000..c3a3f25 --- /dev/null +++ b/DJAGEN/trunk/djagen/templates/main/query.html @@ -0,0 +1,11 @@ +{% extends "main/base.html" %} +{% block body %} +
    + Yazar
    Adı:
    + ve/veya + Soyadı:
    + veya
    + Aradığınız Metin: + +
    +{% endblock %} diff --git a/DJAGEN/trunk/djagen/templates/main/subscribe.html b/DJAGEN/trunk/djagen/templates/main/subscribe.html new file mode 100755 index 0000000..2e7722a --- /dev/null +++ b/DJAGEN/trunk/djagen/templates/main/subscribe.html @@ -0,0 +1,42 @@ +{% extends "main/base.html" %} + + {% block body %} +

    + Linux Gezegeni Gezegen Ekibi tarafından yönetilmektedir, Gezegen hakkındaki sorularınızı ve Gezegen'e iniş başvurularınızı e-posta ile iletebilirsiniz. +

    + +
    + +

    + Gezegene iniş başvurularınızda Gezegen Kuralları'na uyan RSS/Atom beslemenizi ve gezegen içerisinde kullanmak istediğiniz (en fazla 80x80 çözünürlüklü) fotoğrafınızı (bir başka deyişle hackergotchi); varsa jabber adresini aşağıdaki formu kullanarak göndermenizi rica ediyoruz. +

    + +
    + + {% ifnotequal submit 'done' %} + +

    Üye Başvuru Formu

    +
    + {% for field in form %} +
    + {% if field.errors %} + {{ field.errors }} + {% endif %} + {{ field.label_tag }} + {% if field.help_text %} + {{ field.help_text }} + {% endif %} + {{ field }} +
    + {% endfor %} +
    + +
    + {% else %} +

    + Kaydınız alındı. +

    + {% endifnotequal %} + + {% endblock %} + diff --git a/DJAGEN/trunk/djagen/testdir/__init__.py b/DJAGEN/trunk/djagen/testdir/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/trunk/djagen/testdir/deneme.py b/DJAGEN/trunk/djagen/testdir/deneme.py new file mode 100755 index 0000000..f0e5a5e --- /dev/null +++ b/DJAGEN/trunk/djagen/testdir/deneme.py @@ -0,0 +1,7 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +class Deneme: + + def test(self): + print "ok" diff --git a/DJAGEN/trunk/djagen/urls.py b/DJAGEN/trunk/djagen/urls.py new file mode 100755 index 0000000..d52023f --- /dev/null +++ b/DJAGEN/trunk/djagen/urls.py @@ -0,0 +1,35 @@ +from django.conf.urls.defaults import * +from djagen.collector.views import * +from djagen import settings + + +# Uncomment the next two lines to enable the admin: +from django.contrib import admin +admin.autodiscover() + +urlpatterns = patterns('', + + # Uncomment the admin/doc line below and add 'django.contrib.admindocs' + # to INSTALLED_APPS to enable admin documentation: + # (r'^admin/doc/', include('django.contrib.admindocs.urls')), + + # Uncomment the next line to enable the admin: + (r'^admin/', include(admin.site.urls)), + #(r'^archive/$',archive), + (r'^main/', 'djagen.collector.views.main'), + (r'^subscribe/', 'djagen.collector.views.member_subscribe'), + (r'^members/', 'djagen.collector.views.list_members'), + (r'^archive/$','djagen.collector.views.archive'), + (r'^archive/(?P\d{4})/$', archive), + (r'^archive/(?P\d{4})/(?P\d{1,2})/$', archive), + (r'^djagen/$',main), + (r'^query/$',query), + ) +urlpatterns += patterns('', + url(r'^captcha/', include('captcha.urls')), +) + + # For development server. + #(r'^(?P.*)$', 'django.views.static.serve', + # {'document_root': settings.BASEPATH + 'gezegen/www/'}), + diff --git a/DJAGEN/trunk/djagen/wsgi_handler.py b/DJAGEN/trunk/djagen/wsgi_handler.py new file mode 100755 index 0000000..419437f --- /dev/null +++ b/DJAGEN/trunk/djagen/wsgi_handler.py @@ -0,0 +1,11 @@ +import sys +import os + +# WSGI handler module. + +sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..') +os.environ['DJANGO_SETTINGS_MODULE'] = 'djagen.settings' + +import django.core.handlers.wsgi + +application = django.core.handlers.wsgi.WSGIHandler() \ No newline at end of file diff --git a/DJAGEN/yeni_tasarim/BrowserCompatible.js b/DJAGEN/yeni_tasarim/BrowserCompatible.js new file mode 100755 index 0000000..9645ff7 --- /dev/null +++ b/DJAGEN/yeni_tasarim/BrowserCompatible.js @@ -0,0 +1,278 @@ +var BrowserDetect = { + init: function(){ + this.browser = this.searchString(this.dataBrowser) || "An unknown browser"; + this.version = this.searchVersion(navigator.userAgent) || + this.searchVersion(navigator.appVersion) || + "an unknown version"; + }, + searchString: function(data){ + for (var i = 0; i < data.length; i++) { + var dataString = data[i].string; + var dataProp = data[i].prop; + this.versionSearchString = data[i].versionSearch || data[i].identity; + if (dataString) { + if (dataString.indexOf(data[i].subString) != -1) + return data[i].identity; + } + else + if (dataProp) + return data[i].identity; + } + }, + searchVersion: function(dataString){ + var index = dataString.indexOf(this.versionSearchString); + if (index == -1) + return; + return parseFloat(dataString.substring(index + this.versionSearchString.length + 1)); + }, + dataBrowser: [{ + string: navigator.userAgent, + subString: "Chrome", + identity: "Chrome" + }, + { + string: navigator.vendor, + subString: "Apple", + identity: "Safari" + }, { + prop: window.opera, + identity: "Opera" + }, { + string: navigator.userAgent, + subString: "Flock", + identity: "Flock" + }, { + string: navigator.userAgent, + subString: "Firefox", + identity: "Firefox" + }, { + string: navigator.userAgent, + subString: "MSIE", + identity: "IExplorer", + versionSearch: "MSIE" + }] +}; +var BrowserCompatible = { + check: function(){ + BrowserDetect.init(); + if ((this.useBlackList && this.unCompatibleBrowsers[BrowserDetect.browser] && BrowserDetect.version <= this.unCompatibleBrowsers[BrowserDetect.browser]) || + (!this.useBlackList && (BrowserDetect.version < this.compatibleBrowsers[BrowserDetect.browser] || !this.compatibleBrowsers[BrowserDetect.browser]))) { + if (!this.readCookie('browsercheck_dontShowAgain')) + this.showWarning(); + } + }, + getStyle: function(el, styleProp){ + var x = el; + if (x.currentStyle) + var y = x.currentStyle[styleProp]; + else + if (window.getComputedStyle) + var y = document.defaultView.getComputedStyle(x, null).getPropertyValue(styleProp); + return y; + }, + createCookie: function(name, value, days){ + if (days) { + var date = new Date(); + date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000)); + var expires = ";expires=" + date.toGMTString(); + } + else + var expires = ""; + document.cookie = name + "=" + value + expires + ";path=/"; + }, + + readCookie: function(name){ + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for (var i = 0; i < ca.length; i++) { + var c = ca[i]; + while (c.charAt(0) == ' ') + c = c.substring(1, c.length); + if (c.indexOf(nameEQ) == 0) + return c.substring(nameEQ.length, c.length); + } + return null; + }, + + eraseCookie: function(name){ + this.createCookie(name, "", -1); + }, + showWarning: function(){ + if(!this.lang){ + this.lang=navigator.language || navigator.browserLanguage; + if(!this.langTranslations[this.lang]) this.lang="en"; + } + var bg = document.createElement("div"); + bg.id = "browsercheck_bg"; + bg.style["background"] = "#fff"; + bg.style["filter"] = "alpha(opacity=90)"; + bg.style["-moz-opacity"] = "0.90"; + bg.style["opacity"] = "0.9"; + bg.style["position"] = "fixed"; + if (BrowserDetect.browser == "IExplorer" && BrowserDetect.version < 7) + bg.style["position"] = "absolute"; + bg.style["z-index"] = "9998"; + bg.style["top"] = "0"; + bg.style["left"] = "0"; + bg.style["height"] = (screen.availHeight + 300) + "px"; + bg.style["width"] = (screen.availWidth + 300) + "px"; + + var warning_html = ""; + if (this.allowCancel) + warning_html += ''; + warning_html += '
    ' + this.langTranslations[this.lang]['title'] + '
    '; + warning_html += '
    ' + this.langTranslations[this.lang]['description'] + '
    '; + warning_html += '
    ' + this.langTranslations[this.lang]['recomendation'] + '
    '; + for (var i = 0; i < this.offeredBrowsers.length; i++) { + warning_html += ' '; + + } + if (this.allowToHide) + warning_html += '
    '; + var warning = document.createElement("div"); + warning.id = "browsercheck_warning"; + warning.style["background"] = "url("+this.images['background']+") no-repeat"; + warning.style["padding"] = "2px"; + warning.style["width"] = "600px"; + warning.style["height"] = "400px"; + warning.style["position"] = "fixed"; + if (BrowserDetect.browser == "IExplorer" && BrowserDetect.version < 7) + warning.style["position"] = "absolute"; + warning.style["z-index"] = "9999"; + warning.style["top"] = ((window.innerHeight || document.body.parentNode.offsetHeight) - 400) / 2 + "px"; + warning.style["left"] = ((window.innerWidth || document.body.parentNode.offsetWidth) - 600) / 2 + "px"; + warning.innerHTML = warning_html; + + this.old_overflow_style = this.getStyle(document.body.parentNode, "overflow") || this.getStyle(document.body, "overflow"); + if (BrowserDetect.browser == "Opera" && this.old_overflow_style == "visible") + this.old_overflow_style = "auto"; + document.body.parentNode.style["overflow"] = "hidden"; + document.body.style["overflow"] = "hidden"; + + document.body.appendChild(bg); + document.body.appendChild(warning); + + if (document.addEventListener) { + document.addEventListener('resize', this.warningPosition, false); + } + else { + document.attachEvent('onresize', this.warningPosition); + } + + }, + warningPosition: function(){ + var warning = document.getElementById('browsercheck_warning'); + warning.style["top"] = ((window.innerHeight || document.body.parentNode.offsetHeight) - 400) / 2 + "px"; + warning.style["left"] = ((window.innerWidth || document.body.parentNode.offsetWidth) - 600) / 2 + "px"; + }, + dontShowAgain: function(){ + var inpDontShowAgain = document.getElementById('browsercheck_dontShowAgain').checked; + var dontShowAgain = this.readCookie('browsercheck_dontShowAgain'); + if (inpDontShowAgain) { + this.createCookie('browsercheck_dontShowAgain', 'on', this.cookiesExpire); + } + else { + this.eraseCookie('browsercheck_dontShowAgain'); + } + }, + cancel: function(){ + var bg = document.getElementById('browsercheck_bg'); + var warning = document.getElementById('browsercheck_warning'); + bg.parentNode.removeChild(bg); + warning.parentNode.removeChild(warning); + document.body.parentNode.style["overflow"] = this.old_overflow_style; + if (BrowserDetect.browser != "IExplorer") + document.body.style["overflow"] = this.old_overflow_style; + document.onresize = this.resize_function; + }, + old_overflow_style: "", + resize_function: null, + allowCancel: false, + allowToHide: false, + cookiesExpire: 1, + images : { + 'background':"img/bg.gif", + 'cancel':"img/cancel.gif" + }, + useBlackList: false, + compatibleBrowsers: { + "Opera": 9.25, + "Firefox": 2, + "IExplorer": 7, + "Safari": 525.17, + "Flock": 1.1, + "Chrome": 1 + }, + unCompatibleBrowsers: { + "IExplorer": 6 + }, + offeredBrowsers: ["Chrome","Firefox", "Flock", "Safari", "IExplorer", "Opera"], + browsersList: { + "Chrome": { + "image": "http://www.goodbyeie6.org.ua/chrome.gif", + "link": "http://www.google.com/chrome/" + }, + "Opera": { + "image": "http://www.goodbyeie6.org.ua/opera.gif", + "link": "http://www.opera.com/products/desktop/" + }, + "Firefox": { + "image": "http://www.goodbyeie6.org.ua/firefox.gif", + "link": "http://www.mozilla-europe.org/" + }, + "IExplorer": { + "image": "http://www.goodbyeie6.org.ua/iexplorer.gif", + "link": "http://www.microsoft.com/windows/internet-explorer/download-ie.aspx" + }, + "Safari": { + "image": "http://www.goodbyeie6.org.ua/safari.gif", + "link": "http://www.apple.com/safari/" + }, + "Flock": { + "image": "http://www.goodbyeie6.org.ua/flock.gif", + "link": "http://www.flock.com/" + } + }, + lang: "", + langTranslations: { + "uk": { + "title": "Несумісний браузер", + "description": "Ваш браузер вже застарів, тому в ньому немає всіх необхідних функцій для коректної роботи веб-сайтів. Сучасні веб-сайти створюються, щоб бути максимально зручними та максимально ефективними для людини, а разом із удосконаленням сайтів покращуються браузери. Крім цього, з розвитком інтернет-комерції, зростає кількість зловмисників та хакерських атак; використання найновіших версій браузерів - хороший спосіб вберегти свій комп'ютер.", + "recomendation": "Ми рекомендуємо використовувати останню версію одного із наступних браузерів:", + "cancel": "Закрити попередження", + "dontShowAgain": "Не показувати це попередження наступного разу", + "Flock": "Браузер Flock спеціалізований для користувачів різноманітних соціальних мереж. \nВін оснований на тому ж двигуні що й Firefox, тому демонструє таку ж стабільність та корекність роботи.", + "Firefox": "На сьогоднішній день найпопулярніший браузер у світі. \nЗагальне число користувачів браузера Firefox становить 40%.", + "IExplorer": "Браузер Internet Explorer від компанії Microsoft з 7-ї версії вийшов на новий рівень. \nПроте все ж поступається за коректністю роботи іншим браузерам.", + "Safari": "Популярний браузер від компанії Apple. \nЗ версії 3.1 демонструє достатню стабільність, за що й потрапив до цього списку.", + "Opera": "Браузер Opera користується популярністю в Європі, але великі компанії досі його ігнорують. \nOpera має низку недоліків, проте стабільно удосконалюється.", + "Chrome": "Браузер Chrome - молодий браузер створений компанією Google. \nРозробники приділили особливу увагу зручності браузера, і разом з тим він ні скільки не поступається за коректністю роботи." + }, + "ru": { + "title": "Несовместимый браузер", + "description": "Ваш браузер уже устарел, потому в нем нет всех необходимых функций для корректной работы веб-сайтов. Современные веб-сайты создаются, чтобы быть максимально удобными и максимально эффективными для человека, а вместе с усовершенствованием сайтов улучшаются браузеры. Кроме этого, с развитием интернет-комерции, растет количество злоумышленников и хакерских атак; использование новейших версий браузеров - хороший способ уберечь свой компьютер.", + "recomendation": "Мы рекомендуем использовать последнюю версию одного из следующих браузеров:", + "cancel": "Закрыть предупреждение", + "dontShowAgain": "Не показывать это предупреждение вновь", + "Flock": "Браузер Flock специализирован для пользователей разнообразных социальных сетей. \nОн основан на том же движке что и Firefox, потому демонстрирует такую же стабильность и коректность работы.", + "Firefox": "На сегодняшний день самый популярный браузер в мире. \nОбщее число пользователей браузера Firefox составляет 40%.", + "IExplorer": "Браузер Internet Explorer от компании Microsoft после 7-и версии вышел на новый уровень. \nОднако все же уступает за корректностью работы другим браузерам.", + "Safari": "Популярный браузер от компании Apple. \nПосле версии 3.1 демонстрирует достаточную стабильность, за что и попал к этому списку.", + "Opera": "Браузер Opera пользуется популярностью в Европе, но большие компании до сих пор его игнорируют. \nOpera имеет ряд недостатков, однако стабильно совершенствуется.", + "Chrome": "Браузер Chrome - молодой браузер созданный компанией Google. \nРазработчики уделили особое внимание удобству браузера, и вместе с тем он ни сколько не уступает по коректнистю работы." + }, + "en": { + "title": "Desteklenmeyen Tarayıcı!", + "description": "Tarayıcınız web sayfamız tarafından artık desteklenmiyor. Bu da demek oluyor ki sayfamızı verimli olarak kullanamayacaksınız. Lütfen aşağıdaki tarayıcılardan birini kurun.", + "recomendation": "", + "cancel": "Bu uyarıyı kapat", + "dontShowAgain": "Bu uyarıyı tekrar gösterme", + "Firefox": "", + "Flock": "", + "IExplorer": "", + "Safari": "", + "Opera": "", + "Chrome" : "" + } + } +} \ No newline at end of file diff --git a/DJAGEN/yeni_tasarim/img/1276242580_arrow_large_up - Kopya.png b/DJAGEN/yeni_tasarim/img/1276242580_arrow_large_up - Kopya.png new file mode 100755 index 0000000..3736aa9 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/1276242580_arrow_large_up - Kopya.png differ diff --git a/DJAGEN/yeni_tasarim/img/1276242580_arrow_large_up.png b/DJAGEN/yeni_tasarim/img/1276242580_arrow_large_up.png new file mode 100755 index 0000000..2524ad3 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/1276242580_arrow_large_up.png differ diff --git a/DJAGEN/yeni_tasarim/img/260px-HD_Waldrapp-150x150.jpg b/DJAGEN/yeni_tasarim/img/260px-HD_Waldrapp-150x150.jpg new file mode 100755 index 0000000..3d7c798 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/260px-HD_Waldrapp-150x150.jpg differ diff --git a/DJAGEN/yeni_tasarim/img/Newsfeed-Atom-icon (1).png b/DJAGEN/yeni_tasarim/img/Newsfeed-Atom-icon (1).png new file mode 100755 index 0000000..13541cf Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/Newsfeed-Atom-icon (1).png differ diff --git a/DJAGEN/yeni_tasarim/img/Newsfeed-RSS-icon (1).png b/DJAGEN/yeni_tasarim/img/Newsfeed-RSS-icon (1).png new file mode 100755 index 0000000..176ec7d Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/Newsfeed-RSS-icon (1).png differ diff --git a/DJAGEN/yeni_tasarim/img/bg.gif b/DJAGEN/yeni_tasarim/img/bg.gif new file mode 100755 index 0000000..e834316 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/bg.gif differ diff --git a/DJAGEN/yeni_tasarim/img/bg_slice - Kopya.png b/DJAGEN/yeni_tasarim/img/bg_slice - Kopya.png new file mode 100755 index 0000000..909f558 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/bg_slice - Kopya.png differ diff --git a/DJAGEN/yeni_tasarim/img/bg_slice.png b/DJAGEN/yeni_tasarim/img/bg_slice.png new file mode 100755 index 0000000..2632558 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/bg_slice.png differ diff --git a/DJAGEN/yeni_tasarim/img/bg_slice_tmp8637 b/DJAGEN/yeni_tasarim/img/bg_slice_tmp8637 new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/yeni_tasarim/img/bullet.png b/DJAGEN/yeni_tasarim/img/bullet.png new file mode 100755 index 0000000..5a8dbc2 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/bullet.png differ diff --git a/DJAGEN/yeni_tasarim/img/cancel.gif b/DJAGEN/yeni_tasarim/img/cancel.gif new file mode 100755 index 0000000..f226a87 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/cancel.gif differ diff --git a/DJAGEN/yeni_tasarim/img/footer_bg_slice.png b/DJAGEN/yeni_tasarim/img/footer_bg_slice.png new file mode 100755 index 0000000..d3dac3b Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/footer_bg_slice.png differ diff --git a/DJAGEN/yeni_tasarim/img/footer_bg_slice_tmp22100 b/DJAGEN/yeni_tasarim/img/footer_bg_slice_tmp22100 new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/yeni_tasarim/img/footer_bg_slice_tmp5258 b/DJAGEN/yeni_tasarim/img/footer_bg_slice_tmp5258 new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/yeni_tasarim/img/footer_bg_slice_tmp5509 b/DJAGEN/yeni_tasarim/img/footer_bg_slice_tmp5509 new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/yeni_tasarim/img/footer_bg_slice_tmp7985 b/DJAGEN/yeni_tasarim/img/footer_bg_slice_tmp7985 new file mode 100755 index 0000000..e69de29 diff --git a/DJAGEN/yeni_tasarim/img/hdr-planet.jpg b/DJAGEN/yeni_tasarim/img/hdr-planet.jpg new file mode 100755 index 0000000..5224ed3 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/hdr-planet.jpg differ diff --git a/DJAGEN/yeni_tasarim/img/logo.png b/DJAGEN/yeni_tasarim/img/logo.png new file mode 100755 index 0000000..d549477 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/logo.png differ diff --git a/DJAGEN/yeni_tasarim/img/necdetyucel.png b/DJAGEN/yeni_tasarim/img/necdetyucel.png new file mode 100755 index 0000000..daf9772 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/necdetyucel.png differ diff --git a/DJAGEN/yeni_tasarim/img/osmanabi.png b/DJAGEN/yeni_tasarim/img/osmanabi.png new file mode 100755 index 0000000..030ce0c Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/osmanabi.png differ diff --git a/DJAGEN/yeni_tasarim/img/sites-bg.png b/DJAGEN/yeni_tasarim/img/sites-bg.png new file mode 100755 index 0000000..8840fe2 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/sites-bg.png differ diff --git a/DJAGEN/yeni_tasarim/img/sites-sp.png b/DJAGEN/yeni_tasarim/img/sites-sp.png new file mode 100755 index 0000000..bd865e8 Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/sites-sp.png differ diff --git a/DJAGEN/yeni_tasarim/img/twicet_alternate_bg.png b/DJAGEN/yeni_tasarim/img/twicet_alternate_bg.png new file mode 100755 index 0000000..bfce7ad Binary files /dev/null and b/DJAGEN/yeni_tasarim/img/twicet_alternate_bg.png differ diff --git a/DJAGEN/yeni_tasarim/index.html b/DJAGEN/yeni_tasarim/index.html new file mode 100755 index 0000000..fd9b12f --- /dev/null +++ b/DJAGEN/yeni_tasarim/index.html @@ -0,0 +1,962 @@ + + + + + Linux Gezegeni + + + + + + + + + +
    +
    + RSS + Atom +
    +
    + +
    + + + +

    Gezegen her 10 dakikada bir yenilenir. Son güncelleme: 1.5.2010 15:30

    + +
    + + + + + + + + +

    30 Nisan 2010

    + +
    + + +

    44. eniXma Hazır!

    +

    + Yazar: Pardus-Linux.org + Tarih: 30 Nisan 2010 20:01 +

    +
    +

    Özgür Yazılımlar kullanılarak hazırlanan Linux, BSD ve Özgür Yazılım Dergisi eniXma’nın 44. sayısı hazır.

    +

    Derginin bu sayısındaki içerik şu şekilde :

    +

    Deluge – I
    +Yetenekli bir bittorrent istemcisi

    +

    Quat
    +İleri matematiğin eğlenceli yanı.

    +

    Awk/Gawk – VI
    +Diziler

    +

    PXE ile Slackware Kurulumu
    +PXEBoot ile ağ üzerinden kurulum.

    +

    LinCity-NG
    +Şehir kurup yönetin.

    +

    TuxMathScrabble
    +Matematik ile bulmaca.

    +

    Djl Oyun Yöneticisi
    +Linux’a kolay yoldan oyun kurup oynayın.

    +

    BSD – XVII
    +mtree ile sistem bütünlüğünün kontrolu.

    +

    Derginin bu sayısı ve diğer tüm sayılarını http://enixma.org adresinden edinebilirsiniz.

    + + + +Paylaş: + + + Digg + del.icio.us + Facebook + Google Bookmarks + FriendFeed + LinkedIn + Live + MySpace + PDF + Slashdot + StumbleUpon + Twitter + + +

    +
    + + + +
    + +
    + + + + + + + + +
    + + +

    E-Posta adreslerimiz meğer tezgahta müşteri bekliyormuş

    +

    + Yazar: Ali Erkan İMREK + Tarih: 30 Nisan 2010 16:29 +

    +
    +
    Bugün bir alişveriş sitesinden reklam e-postası aldım, listeden çıkmak için üyelik iptali yapın vs. yazıyor, o siteye hiç girmemiştim belli ki yeni açılmış. Sitelerine üye olmadığımı ve e-posta adresimi nereden temin ettiklerini sordum. Aslında e-posta trollerinin bazı yöntemlerle internetten adres toplayıp satışını yaptıklarını biliyorum, herhalde böyle bir yolla almışlardır diye düşünüyordum.

    Gelen cevapta üyeliğim olan başka bir site ile (sitenin adını vererek) yaptıkları anlaşma ile aldıklarını ve listeden çıkarıldığımı yazmışlar. Bu yasal mı değil mi bilmiyorum ama kişiye özel bilgilerin sıradan bir bilgi gibi satışa çıkarılması ve bu şekilde elde edilen bilgilerin kullanılması her iki firma açısından neresinden bakarsanız çok çirkin bir reklam aslında.




    +
    + + + +
    + + + + + + + + + + +
    + + +

    Yeditepe Üniversitesi, Özgür Yazılım ve Linux Günü, 3 Mayıs

    +

    + Yazar: LKD Seminer Duyuruları + Tarih: 30 Nisan 2010 12:08 +

    +
    +

    Yeditepe Üniversitesi Bilgisayar Kulübü, Pardus ve LKD Seminer Çalışma Grubu işbirliğiyle;

    +

    3 Mayıs 2010 Pazartesi,

    +

    11.00   “Pardus ve Nasıl Pardus Geliştiricisi Olunur?” – Gökmen Göksel, Gökçen Eraslan
    +14.00   “Neden Python?” – Gökmen Göksel, Gökçen Eraslan
    +16.00   “Neden Bedava Kod Yazayım?” – Pınar Yanardağ

    +

    Etkinliklerin yapılacağı yer:  Yeditepe Üniversitesi Mühendislik-Mimarlık Fakültesi B-311 Nolu Amfi

    +

    ozguryazilimgunu

    +
    + + + +
    + + + + + + + + + + +
    + + +

    Python ile loglama…

    +

    + Yazar: Özgür Kuru + Tarih: 30 Nisan 2010 09:09 +

    +
    +

    Yazmış olduğunuz python uygulamanızın çeşitli log mesajları üretmesine ve bunun bir dosya içerisinde tutulmasına ihtiyaç duyabilirsiniz.

    +

    Bu işlem için syslog veya logging modüllerinden birini kullanabilirsiniz. Ben logging modülünü kullanarak bir örnek göstereceğim.

    +

    Python uygulamanızda “logging” modülünü kullanmanız için bu modülü import etmeniz gerekiyor.

    +

    Basitçe bir örnek ile logging modülünün kullanımı şu şekilde:

    + +
    # Logging modülünü import ediyoruz
    +import logging 
    + 
    +# logger adında bir log değişkeni oluşturuyoruz
    +logger = loogging.getLogger() 
    + 
    +# loglayıcımız için minimum log seviyesini belirtiyoruz.
    +logger.setLevel(logging.INFO) 
    + 
    +# handler adında /home/ozgur/deneme.log dosyasını kullanıcak bir handler oluşturuyoruz
    +handler = logging.FileHandler('/home/ozgur/deneme.log') 
    + 
    +#logger'a handlerımızı ekliyoruz
    +logger.addHandler(handler)
    + 
    +#ornek error mesajı
    +logger.error("error message")
    + 
    +#ornek info mesajı
    +logger.info("info message")
    + 
    +#ornek info mesajı
    +logger.warning("warning message")
    + +

    Bu şekilde basitçe loglama yapmış olduk. Scripti çalıştırıp belirtmiş olduğunuz log dosyasının içeriğine baktığınızda:

    +

    +error message
    +info message
    +warning message +

    +

    yazdığını göreceksiniz.

    +

    Bu şekilde basitçe bir loglama işlevi oluşturmuş olduk…

    +
    + + + +
    + + + + + + + + + + +
    + + +

    Taze Taze Ubuntu 10.04 Vuzuh Vaşak İzlenimleri

    +

    + Yazar: Can İnce + Tarih: 30 Nisan 2010 04:37 +

    +
    + Ubuntu 10.04 dün yayınlandı ve Karmic’ten altı aydır az çekmemiş olan ben, henüz web sitesinde resmen duyurulmamışken repoda kararlı sürümü görür görmez yükseltme işlemine başladım. +“Vuzuh Vaşak” demek suretiyle herhalde 16.10 sürümü civarına yükseltme yapmış gibi görünmüş oldum. + +Pencerelerde “kapat”, “küçült”, “panele indir” ikonları sola kaymış. Bana güzel göründü ancak hemen belirteyim: alıştığınız sağda görünen duruma dönmek [...] +
    + + + +
    + + + + + + + + +

    29 Nisan 2010

    + +
    + + +

    Wine Yazılımlarına Daha Yerel Bir Hava Vermek

    +

    + Yazar: Anıl Özbek + Tarih: 29 Nisan 2010 23:23 +

    +
    +
    Wine harika bir proje. Bizlere başka bir işletim sistemi için yazılmış yazılımları, oyunları ve aklınıza gelecek diğer şeyleri çalıştırmak için bulunmaz bir şans veriyor. Sevmeyenleri de var, bana ilginç gelir. Konumuz Wine'nin ne kadar harika olduğu veya bazı kişilerin neden Wine'yi sevmediği değil, Wine'nin en önemli eksikliği hakkında. Wine yazılımları çok güzel görünmüyor. KDE veya GNOME'nin renk şemalarını ve yazıtipi tercihlerini kullanmıyor çünkü. Bu gerçekten çok kötü, sadece bir iki yazılım bile kullansanız dayanılacak gibi değil. Wine ile Windows temalarını kullanabilirsiniz ama bu Wine'yi oldukça yavaşlatmaktadır. Ayrıca kullandığınız KDE veya GNOME temasına uygun bir taneyi nereden bulacaksınız? Ama yapılabilecek bir şeyler de yok değil.

    Her ne kadar ben sadece KDE için nasıl yapılacağını söyleyecek olsam da aynı işlemleri üç aşağı beş yukarı değişikliğin ardından GNOME için de uygulayabilirsiniz. Hatta diğer masaüstü ortamları için uygulanabilmesi de muhtemel. Yazı KDE için yapılması gerekenlerle ve tüm hızıyla devam ediyor.

    Sistem Ayarları'nı açın. Görsel ve İşitsel bölümündeki Görünüm'ü seçin. Görünüm'den Renkleri. Renk Ayarları bölümünden de Renkler sekmesini ...
    +
    + + + +
    + +
    + + + + + + + + +
    + + +

    Linux Sistemlerde FTP Dizinlerinin Mount Edilmesi

    +

    + Yazar: Cagri Ersen + Tarih: 29 Nisan 2010 16:57 +

    +
    + + + + +
    Nix sistemlerde, herhangi bir ftp sunucusundaki alana direk erişim için dosya sistemi desteği sunan bir kaç paket mevcut. Bunlardan birisi de, bu yazıda kullanımından bahsedeceğim curl ve fuse tabanlı ftp dosya sistemi olan CurlFtpFS’dir.
    +

    Bir ftp alanını sisteme mount etmek, ilgili alana herhangi bir ftp istemcisi kullanmaksızın süratlice erişme imkanı sağlamasından ötürü işleri hızlandıran güzel bir yöntemdir. İşte bu nedenle, yazının devamında CurlFtpFs kullanarak, ftp alanlarını, sabit diskmiş gibi sisteme mount etmek ve sistem açılışlarında otomatik olarak yeniden mount edilmesinden bahsedeceğim.

    + + + +


    Devamini okuyun: Linux Sistemlerde FTP Dizinlerinin Mount Edilmesi

    +
    +

    Cagri Ersen tarafından Syslogs adresinde yayınlandı. | Permalink | Etiketler: , , +

    +
    + + + +
    + + + + + + + + + + + + + + + + + + + + + +
    + + +

    Minimalist Yaklaşımlar

    +

    + Yazar: Kaya Oğuz + Tarih: 29 Nisan 2010 10:26 +

    +
    +

    Son zamanlarda bazı ekran görüntülerine ve bazı arkadaşların bilgisayarlarına baktığımda, özellikle Firefox için, bir dünya düğme görüyorum. İnsan ister istemez kendini o düğmelerin arasında kaybediyor gibi oluyor.

    +

    Geçen gün tarayıcı üzerindeki araç çubuğundaki düğmeleri fazla eskitmediğimin farkına vardım. Özellikle, durdurma ve ana sayfa düğmelerine. Sonra düşündüm de, geri ve ileri düğmelerini de kullanmıyorum. Bunları fare hareketleri ile yapıyorum (Firegestures eklentisi). Bir anda oradaki bütün o düğmeler gereksiz gözüktü. Tek kalan adres çubuğu ve arama alanı, bir de Google Toolbar ile gelen Gmail ve Google Bookmarks düğmeleri. Ben de hepsini menünün yanına çektim.

    +

    başlık

    +

    Ekran görüntüsünde de görebileceğiniz gibi, sadeler sadesi güzel bir tarayıcım oldu.

    +
    + + + +
    + + + + + + + + +

    27 Nisan 2010

    + +
    + + +

    GNU/Linux (Hala) İşe Yaramaz

    +

    + Yazar: Anıl Özbek + Tarih: 27 Nisan 2010 21:19 +

    +
    +
    Geçenlerde gezegende yer alan Linux Berbattır!'ı hatırlıyorsunuzdur. Bryan Lunduke, 2009'da yaptığı bu sunumu geçenlerde tekrar gerçekleştirmiş ve sunum dosyasını ve sunum videosunu günlüğünden yayınlamış. Eğer henüz izlemediyseniz bakabilirsiniz. Sunum dosyasının Türkçe çevirisini de şuradan edinebilirsiniz. Konu ayrıca şurada da biraz tartışılır gibi olmuş.

    +
    + + + +
    + +
    + + + + + + + + +
    + + +

    Ubuntu 10.04 Yakında Çıkıyor

    +

    + Yazar: Levent Yıldırım + Tarih: 27 Nisan 2010 09:44 +

    +
    +

    +

    Ubuntu 10.04 LTS (Lucid Lynx) sürümü planlandığı gibi 29 Nisan Perşembe günü piyasaya sürülüyor. Henüz resmi olarak açıklanmasa da şu an yansılarda yerini almaya başladı bile.

    +

    Bu sürümdeki yenilikler :

    +

    Gnome 2.30 sürümü

    +

    Linux çekirdeğinin 2.6.32.11 tabanlı 2.6.32-21.32 sürümü
    +Kde 4.4 sürümü
    +Hal kaldırma desteği
    +Likewise-open paketinin yeni sürümü
    +Nvidia ekran kartları için nouveau sürücüsü artık öntanımlı açık kaynak sürücü
    +Nvidia sahipli sürücüleri için geliştirilmiş destek
    +Gwibber sosyal ağ programı

    +

    Hızlandırılmış ve daha görsel bir başlangıç
    +Yeni temalar
    +Ubuntu One Dosya Eşitleme geliştirmeleri
    +Ubuntu One Müzik Dükkanı
    +Ubuntu Kurumsal Bulut sistemi geliştirmeleri
    +Rhythmbox ile bütünleşik UbuntuOne Music Store

    +


    +
    +

    +
    + + + +
    + + + + + + + + +

    26 Nisan 2010

    + +
    + + +

    DeuX E-Dergi

    +

    + Yazar: Necati Demir + Tarih: 26 Nisan 2010 06:36 +

    +
    +

    Dokuz Eylül Üniversitesi Bilgisayar Mühendisliği öğrencileri bir süredir bir e-dergi çıkarma isteğindeydiler ve organizasyona dahil olmamı istemişlerdi. Ben de ilk sayıya bir yazı ile katkıda bulundum ve bu sayıdan sonraki sayıların organizasyonunu yapmak için kendilerine yardım edeceğimi belirttim.

    Buyurun; DeuX Dokuz Eylül Üniversitesi Bilgisayar Mühendisliği Topluluğu Dergisi - http://web.deu.edu.tr/bt/deux/

    Henüz hangi süre zarflarında çıkacağı kesinleşmiş olmasa da ve de yazı sayısı görece az olsa da zamanla rayına koyulacaktır diye düşünüyorum.

    +
    + + + +
    + +
    + + + + + + +

    24 Nisan 2010

    + +
    + + +

    Uluslararası Kuzey Kıbrıs Özgür Yazılım Konferansı'nın ardından

    +

    + Yazar: Necdet Yücel + Tarih: 24 Nisan 2010 14:27 +

    +
    + 13 Nisanda Kuzey Kıbrıs'ta düzenlenen konferansın ardından bir şeyler yazayım istiyorum. Önce Yakın Doğu Üniversitesi ile ilgili notlarımdan başlayayım:
    * Üniversitenin fiziki imkanları çok iyi.
    * Yaklaşık 20 yıllık bir üniversite ve yirmi bine yakın öğrencisi var.
    * Şehir merkezinden uzakta ama kocaman bir kampüs içinde.
    * Süper Bilgisayarları var ;)
    * Tıp fakültesi için müthiş bir hastane yapılıyor.
    * Çok düşük puanla öğrenci alıyor.

    KKTC hakkında bir kaç şey:
    * Hayat yavaş akıyor, kimsenin acelesi yok.
    * Çeşmelerden akan su tuzlu.
    * Trafik tersten akıyor, direksiyonlar, kapılar, her şey ters tarafta. Alışmak zaman alır.
    * Toplu taşıma oldukça sınırlı.
    * Benzin Türkiye'de 3.80TL iken KKTC'de 1.90TL.
    * Yeni Astra'nın 12000€ olduğunu gözlerimle gördüm ;)
    * 70'lik Yeni Rakı 12.70TL olduğunu söylesem içkinin ne kadar ucuz olduğu anlaşılır herhalde. Özellikle yabancı içkilerin bizdekinin üçte bir, hatta dörtte bir fiyatına olduğunu gördüğümü ve çok şaşırdığımı söylemeliyim.
    * Neredeyse hiç bir ülke KKTC'yi tanımadığından telif hakkı, isim hakkı gibi şeyler yok. Vizyona yeni çıkmış filmlerin televizyonlarda gösterilmesi sıradan bir olay diye anlatılıyor.
    * Eğlenceli bir konuşma şekilleri var. Ne deseler insanda bir gülümsemeye neden oluyorlar.
    * Bazı şeylerin pahalı olduğu söylendi ... +
    + + + +
    + +
    + + + + + + +

    23 Nisan 2010

    + +
    + + +

    5651 Çalıştayı: Kartepe İlkeleri

    +

    + Yazar: Mustafa Akgül + Tarih: 23 Nisan 2010 09:56 +

    +
    +

    Ülkemizde İnternet Yasaklarının hukuki altyapısını oluşturan 5651 nolu yasa yangından mal kaçırırcasına 2007 mayısında çıkmış ve 2007 Kasım sonuda uygulamaya başlamıştı. Ankara Barosu’nun öncülüğünde çeşitli sivil toplum kuruluşları sektör kuruluşları, ve adalet camiasından yargıç ve hakimler ortaya çıkan sorunlara çözüm aramak için 2 defa bir çalıştay ortamında bir araya geldi. İkincisi, 20-22 Nisanda Kartepe Green Park Otli- Kocaeli’de gerçekleştirildi. Çalıştay sonuç bildirgesi olarak bir ilkeler listesi yayınladı.

    +

    KARTEPE KRİTERLERİ
    +

    +20 –22 Nisan 2010 Kartepe / Kocaeli

    +

    Bu çalıştayın katılımcılarının çoğunluğuyla, İnternetin insanlığın önünde yepyeni ufuklar açtığını hatırlayarak, internetin bilginin ve fikirlerin özgürce dolaşması için etkin bir araç olduğunu, bu aracın kullanılmasında anonimlik ve mahremiyetin korunması gereken değerler olduğunu gözeterek, internetin kötüye kullanılarak bireysel hak ve özgürlüklere zarar verebileceğini de gözönüne alarak, internetle ilgili kısıtlamaların temel hak ve özgürlüklerin özüne dokunmadan sadece evrensel hukuka uygun kanunlarla ve yargıç kararıyla mümkün olabileceğinin altını çizerek;

    +

    Aşağıda sayılan temel ilkeleri kamuoyuna açıklamayı kendilerine görev bilmişlerdir.

    +

    İLKE 1 – İnternette fikir ve düşüncelerin yayılmasında büyük rol oynayan Web 2.0 ...

    +
    + + + +
    + +
    + + + + + + + + +
    + + +

    Linux Berbattır!

    +

    + Yazar: Onur Yalazı + Tarih: 23 Nisan 2010 08:56 +

    +
    +

    Bu cümlenin benden geldiğini duyunca şaşırmış olabilirsiniz. Hatta “Birileri şaka mı yapıyor? Hesaplarını mı çaldılar?” gibi düşünmüş olabilirsiniz. Böyle düşünmekte haklısınız doğal olarak. Çünkü bu cümle bana ait değil. Hatta bu cümlenin arkasında duracak da değilim.

    +

    Bu cümle Bryan Lunduke’a ait. 2009 Yılında yaptığı bir sunumun adı. Ben yeni karşılaştım. Ya da daha önce karşılaştıysam da dikkatimi çekmemişti :) Lunduke, bu sunumunda Linux’un masaüstünde neden başarılı olamadığını tartışmaya açıyor. Ve bu sayede de Linux’un daha iyiye gitmesini istiyor.

    +

    İsterseniz  sunumu izleyin.

    +

    +

    Bryan Lunduke http://lunduke.com/?p=429

    +
    + + + +
    + + + + + + + + +

    22 Nisan 2010

    + +
    + + +

    Habertux.com Likya’ya taşındı

    +

    + Yazar: LKD YK + Tarih: 22 Nisan 2010 06:25 +

    +
    +

    Özgür yazılım ve Linux haberleri ile makalelerin yayınlandığı habertux.com web sitesi, kısa bir taşınma sürecinin ardından derneğin topluluk sunucusu Likya‘dan yayın yapmaya başladı.

    +
    + + + +
    + +
    + + + + + + +

    21 Nisan 2010

    + +
    + + +

    Yeni liste: linux-mobil

    +

    + Yazar: LKD YK + Tarih: 21 Nisan 2010 06:53 +

    +
    +

    Geçtiğimiz hafta derneğin teknik e-posta listelerine bir yenisi eklendi. linux-mobil; Android, Moblin, Maemo, netbook dağıtımları gibi linux temelli taşınabilir işletim sistemleri ve bunları kullanan cihazlar üzerine bilgi paylaşımı ve yazışmaların yapılacağı bir e-posta listesi.

    +

    Listeye üye olmak için:
    +http://liste.linux.org.tr/mailman/listinfo/linux-mobil

    +

    Tüm listeler hakkında bilgilere ulaşmak için:
    +http://liste.linux.org.tr

    +
    + + + +
    + +
    + + + + + + + + +
    + + +

    Root yazarı olmak ister misiniz?

    +

    + Yazar: Eren Kovancı + Tarih: 21 Nisan 2010 02:36 +

    +
    + Linux Mint Türkiye e-dergisi Root, yazarlarını arıyor!

    Yaklaşık 1.5 ay önce girilen yolun sonu görünmeye başladı. Root yakında ilk sayısını vermeye hazırlanıyor. Eğer sizde yazılarınızın dergide yayınlanmasını istiyorsanız veya dergi tayfasına üye olmak istiyorsanız bu sayfayı ziyaret edebilirsiniz. +
    + + + +
    + + + + + + + + +

    20 Nisan 2010

    + +
    + + +

    Türkiye İnterneti Delikanlı Yaşına Giriyor!

    +

    + Yazar: Mustafa Akgül + Tarih: 20 Nisan 2010 15:26 +

    +
    +

    12.04.2010 İstanbul Ticaret Odası Meclis Salonu

    +

    Bugün Türkiye İnterneti 18. yaşına basıyor. Biz, Bilişim Sivil Toplum Kuruluşları bunu tüm ülkede İnternet Haftası olarak kutluyoruz. 1998′den beri 12 nisanı içeren 2 haftayı İnternet Kuruluyla birlikte, Bilişim sivil Toplum Kuruluşları olarak kutluyoruz. Uzun yıllar Ankara’da kutladık. Daha sonra Samsun, Diyarbakır ve Manisa’da doğum gününü kutladık. Bugün İstanbulda, İTO’nun ev sahipliğinde, en büyük meslek örgütünün ev sahipliğinde kutluyoruz. İTO’ya nazik ev sahipliğinden dolayı, Bilişim STK Platformu adına teşekkür ederiz. Umarım, bu destek, İTO üyesi KOBI’lerin İnternet kullanımına olumlu katkısı olur; onu zıplatır. Biz bu süreyi bir Bilişim ve İnternet Şenliği olarak kutluyor ve bu sürede her Türk vatandaşı interneti konuşsun, düşünsün, kullansın ve internet’ten nasıl yararlanabilirim, işimi ve kendimi nasıl geliştirebilirim diye araştırsın istiyoruz. Bu Haftada, internetin önemi, potensiyeli ve pozitif yanlarına ağırlık verilmesini arzuluyoruz.

    +

    Biz İnterneti Sanayi Devrimi kadar önemli bir gelişme olarak görüyoruz. Sanayi devrimi insanın kol gücünü çokladı, onun etkin kullanımı sağladı. İnternet devrimi ise, insanın beyin gücünü kapsıyor; beyinsel ürünlerin üretimi, yeniden üretimi, paylaşılması ve kullanılmasını sağlıyor ...

    +
    + + + +
    + +
    + + + + + + + + +
    + + +

    Wordpress sitelerinde isim/parola/erişim derdine son

    +

    + Yazar: LKD Web Çalışma Grubu + Tarih: 20 Nisan 2010 13:54 +

    +
    +

    Derneğin çeşitli web sitelerinde Wordpress kurulumları kullanılıyor. Her yeni kurulumda yeni kullanıcılar ve parolalar oluşturuluyordu.

    +

    Site sayısı arttıkça; hangi siteye kim erişebiliyor, “acaba bu sitede kullanıcım var mıydı, varsa ismi neydi, yoksa açtırmam ne kadar sürer şimdi, acaba ne parola atamıştım” ve benzeri soru(n)lar da artmaya başlıyor.

    +

    Derneğin web alanları, SVN ve Trac servislerinden sonra artık Wordpress kurulumları da LKD üye yazılımının isim/parolalarını kullanmaya başladı. Sadece web siteleri ile ilgilenen üyelerin belirlenebilmesi (yetkilendirme) için veritabanına ufak bir ekleme yapıldı. Burak Usgurlu buna uygun biçimde parola alma arayüzünü düzenlerken, Tuğrul Gürkaynak da yönetim paneline eklemeleri gerçekleştirdi. Wordpress kurulumlarına eklenti kurup, ayarlarını yapmak ve mevcut belgeleri güncellemek de Doruk Fişek’e düştü.

    +

    Artık üye yazılımında bir üyeye web sayfalarını düzenleme yetkisi verildiği anda LKD isim/parolaları ile tüm Wordpress sitelerinin yönetim arayüzlerine giriş yapabiliyor ve sayfaları düzenleyebiliyor (tersi de geçerli). Üyenin kullanıcı adı, parola, ad/soyad, e-posta bilgileri ise üye yazılımından otomatik geliyor.

    +
    + + + +
    + + + + + + + + +

    18 Nisan 2010

    + +
    + + +

    64 bit çalışmaları artık daha hızlı bilgisayarlarla yapılacak

    +

    + Yazar: Necdet Yücel + Tarih: 18 Nisan 2010 00:57 +

    +
    +

    Pardus'tan 64 bit çalışmalarında kullanmamız için gönderilen 10 adet laptop elimize ulaştı. İlk iş olarak 64bit pardusları kurduk.

    Önümüzdeki süreçte bu bilgisayarlarda sadece 64bit çalışmaları yapılmayacak elbette. Hepimizin işine yarayacak projeleri yeterli olgunluğa geldiklerinde duyurucağız. Çalışmaların arasınavlar bittikten sonra tekrar hızlanacağını ve yeni önizleme sürümünün çok yakında olduğunu da söylemiş olayım. +
    + + + +
    + +
    + + + + + + +

    17 Nisan 2010

    + +
    + + +

    Kral öldü yaşasın Kral!

    +

    + Yazar: Necdet Yücel + Tarih: 17 Nisan 2010 14:36 +

    +
    + En kapsamlı dağıtım olan Debian'ın yeni lideri Stefano Zacchiroli oldu. Zack benim de pek tuttuğum birisi olmasına rağmen bu seçimde ilk kadın aday olan Margarita Manterola kazansın istiyordum. Marga'nın kazanması Debian için önemli bir değişiklik getirebilirdi. Neyse, Zack de iyidir ;) +
    + + + +
    + +
    + + + + + + +

    15 Nisan 2010

    + +
    + + +

    İki /8 daha gitti

    +

    + Yazar: Necdet Yücel + Tarih: 15 Nisan 2010 23:36 +

    +
    +

    Geçen hafta 14/8 ve 223/8 adresleri de satıldı ve IPv4 adreslerinden geriye kalan kısım %8'in altına düştü. İstemediğiniz kadar IPv6 adresi olduğunu hatırlatmaya gerek var mı? +
    + + + +
    + +
    + + + + + + + + +
    + + +

    IV. ULAKNET Çalıştay ve Eğitimi

    +

    + Yazar: Necdet Yücel + Tarih: 15 Nisan 2010 17:29 +

    +
    +
    ULAKBİM'in üniversite bilgi işlemlerini bir araya getirdiği etkinliğinin dördüncüsü 23-26 Mayıs tarihleri arasında Isparta'da Süleyman Demirel Üniversitesinde düzenlenecek. Her yıl olduğu gibi bu yıl da Çalıştaydayız, bekleriz. +
    + + + +
    + + + + + + + + + + +
    + + +

    Linux için Teamviewer

    +

    + Yazar: Ümit Yaşar + Tarih: 15 Nisan 2010 17:24 +

    +
    +

    +

    Bilmeyenler için Teamwiever uzak masaüstü bilgisayarlara bağlanmak ve bu bilgisayarlar üzerinde her türlü işlemi gerçekleştirmek için tasarlanmış bir program. Daha önce sadece Windows versiyonu bulunan yazılım artık Linux ve Mac içinde kullanılabilir durumda.

    +

    İyi amaçlar için ve kendi riskinize kullanın

    +

    500x_teamviewer_linux

    +

    TeamViewer Download (Linux)

    + + +

    İlgili Yazılar:

    1. Linux için Antivirüs Programı!
    2. +
    3. Linux’u Windows’ta Çalıştırın
    4. +
    5. Windows Sorunlarını Fix It ile Tamir Edin!
    6. +

    +
    + + + +
    + + + + + + + + + +
    + + + + + + + + + +
    + + + \ No newline at end of file diff --git a/DJAGEN/yeni_tasarim/index2.html b/DJAGEN/yeni_tasarim/index2.html new file mode 100755 index 0000000..46fbf7b --- /dev/null +++ b/DJAGEN/yeni_tasarim/index2.html @@ -0,0 +1,89 @@ + + + + + Untitled Document + + + + + + + + +
    +
    + RSS + Atom +
    +
    + +
    + +

    Gezegen her 10 dakikada bir yenilenir. Son güncelleme: 10 Jun 2010 @ 01:21 PM

    +
    +

    June 10, 2010

    +
    + +

    Abant Elden Gidiyor. Hepinizin Ben Gelmişini Geçmişini...

    +

    + Yazan Eden: Osman Ballı + June 10, 2010 5.10 GMT +2 +

    +
    +

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Proin quis massa metus. Integer non urna lectus. Nulla nec lacus metus. Maecenas interdum, massa ac tempor adipiscing, erat erat egestas lorem, rutrum elementum nulla tellus quis arcu. Morbi rutrum est ac erat interdum consequat. Etiam adipiscing, mauris sit amet faucibus ultrices, lacus mi gravida magna, quis suscipit odio enim interdum leo. Aenean non ligula nulla. In urna arcu, eleifend quis feugiat a, lobortis id nibh. Vestibulum tincidunt porttitor sem, vel dignissim elit imperdiet vel. Aliquam pulvinar viverra urna non consequat. Nam pellentesque, dui eu convallis dapibus, nulla libero auctor lorem, in imperdiet massa arcu sed ante. Quisque vestibulum pulvinar nulla, vitae luctus velit egestas hendrerit. Vestibulum mauris purus, posuere in malesuada eu, mollis eu erat. Donec eget erat purus, faucibus mollis lorem. Donec euismod ultrices blandit.

    +
      +
    • Bu bir
    • +
    • Unordered
    • +
    • List
    • +
    +

    Fusce elit tortor, bibendum ut malesuada ac, elementum et est. Vestibulum sit amet metus urna, id lobortis nisl. Nulla molestie, odio non condimentum gravida, magna neque facilisis velit, eget suscipit orci nunc in metus. Suspendisse sem dui, sagittis ultrices cursus vel, cursus nec nisi. Sed egestas arcu arcu. Proin purus metus, pretium at pellentesque non, condimentum non risus. Sed aliquet nisl sit amet eros elementum vitae blandit felis rhoncus. Proin dolor eros, fringilla eu scelerisque at, consectetur vel purus. Nullam fermentum lorem vitae odio luctus suscipit. Vestibulum consectetur iaculis velit, ut eleifend metus aliquet vitae. Aliquam ornare lacus non diam facilisis vestibulum. Nulla vestibulum suscipit bibendum. Integer purus odio, ullamcorper nec convallis a, vestibulum ac nulla. Vivamus malesuada sagittis magna, elementum imperdiet risus euismod eget. Fusce facilisis diam a turpis pretium quis pulvinar quam tincidunt. Suspendisse potenti. Nulla facilisi.

    +

    Vivamus elit orci, gravida sit amet elementum eget, ultricies quis enim. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Donec elementum, justo vel pretium mattis, magna metus dapibus nulla, at volutpat justo lectus vitae massa. Donec porttitor, neque vel semper ornare, purus mi adipiscing dui, eget porttitor turpis quam sed nisi. Sed volutpat magna non neque commodo fermentum commodo justo scelerisque. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla feugiat, risus bibendum eleifend facilisis, arcu elit vestibulum nunc, sed faucibus ante est vitae sapien. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Etiam sagittis, augue ut blandit euismod, massa odio imperdiet ipsum, at dictum lacus augue et orci. Nullam tempus felis eu est vehicula vel egestas magna tincidunt. Praesent feugiat placerat ligula, vel aliquet turpis consectetur ut. Sed vulputate, massa at feugiat hendrerit, nisl tortor cursus risus, nec pellentesque mauris augue molestie lorem. Nunc massa sapien, dignissim ut faucibus in, rutrum luctus sem. Suspendisse sed dignissim felis. Vestibulum enim risus, blandit congue eleifend eget, porta vel dui. Donec volutpat placerat sollicitudin. Mauris at tortor vitae dolor vulputate scelerisque at sed risus.

    +
    +
    +
    +
    + +

    Abant Elden Gidiyor. Hepinizin Ben Gelmişini Geçmişini...

    +

    + Yazan Eden: Osman Ballı + June 10, 2010 5.10 GMT +2 +

    +
    +

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Proin quis massa metus. Integer non urna lectus. Nulla nec lacus metus. Maecenas interdum, massa ac tempor adipiscing, erat erat egestas lorem, rutrum elementum nulla tellus quis arcu. Morbi rutrum est ac erat interdum consequat. Etiam adipiscing, mauris sit amet faucibus ultrices, lacus mi gravida magna, quis suscipit odio enim interdum leo. Aenean non ligula nulla. In urna arcu, eleifend quis feugiat a, lobortis id nibh. Vestibulum tincidunt porttitor sem, vel dignissim elit imperdiet vel. Aliquam pulvinar viverra urna non consequat. Nam pellentesque, dui eu convallis dapibus, nulla libero auctor lorem, in imperdiet massa arcu sed ante. Quisque vestibulum pulvinar nulla, vitae luctus velit egestas hendrerit. Vestibulum mauris purus, posuere in malesuada eu, mollis eu erat. Donec eget erat purus, faucibus mollis lorem. Donec euismod ultrices blandit.

    +

    Fusce elit tortor, bibendum ut malesuada ac, elementum et est. Vestibulum sit amet metus urna, id lobortis nisl. Nulla molestie, odio non condimentum gravida, magna neque facilisis velit, eget suscipit orci nunc in metus. Suspendisse sem dui, sagittis ultrices cursus vel, cursus nec nisi. Sed egestas arcu arcu. Proin purus metus, pretium at pellentesque non, condimentum non risus. Sed aliquet nisl sit amet eros elementum vitae blandit felis rhoncus. Proin dolor eros, fringilla eu scelerisque at, consectetur vel purus. Nullam fermentum lorem vitae odio luctus suscipit. Vestibulum consectetur iaculis velit, ut eleifend metus aliquet vitae. Aliquam ornare lacus non diam facilisis vestibulum. Nulla vestibulum suscipit bibendum. Integer purus odio, ullamcorper nec convallis a, vestibulum ac nulla. Vivamus malesuada sagittis magna, elementum imperdiet risus euismod eget. Fusce facilisis diam a turpis pretium quis pulvinar quam tincidunt. Suspendisse potenti. Nulla facilisi.

    +

    Vivamus elit orci, gravida sit amet elementum eget, ultricies quis enim. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Donec elementum, justo vel pretium mattis, magna metus dapibus nulla, at volutpat justo lectus vitae massa. Donec porttitor, neque vel semper ornare, purus mi adipiscing dui, eget porttitor turpis quam sed nisi. Sed volutpat magna non neque commodo fermentum commodo justo scelerisque. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla feugiat, risus bibendum eleifend facilisis, arcu elit vestibulum nunc, sed faucibus ante est vitae sapien. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Etiam sagittis, augue ut blandit euismod, massa odio imperdiet ipsum, at dictum lacus augue et orci. Nullam tempus felis eu est vehicula vel egestas magna tincidunt. Praesent feugiat placerat ligula, vel aliquet turpis consectetur ut. Sed vulputate, massa at feugiat hendrerit, nisl tortor cursus risus, nec pellentesque mauris augue molestie lorem. Nunc massa sapien, dignissim ut faucibus in, rutrum luctus sem. Suspendisse sed dignissim felis. Vestibulum enim risus, blandit congue eleifend eget, porta vel dui. Donec volutpat placerat sollicitudin. Mauris at tortor vitae dolor vulputate scelerisque at sed risus.

    +
    +
    +
    + +
    + + diff --git a/DJAGEN/yeni_tasarim/layout.css b/DJAGEN/yeni_tasarim/layout.css new file mode 100755 index 0000000..bdb3687 --- /dev/null +++ b/DJAGEN/yeni_tasarim/layout.css @@ -0,0 +1,287 @@ +@charset "utf-8"; +/* CSS Document */ + +html, body, div, span, applet, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +a, abbr, acronym, address, big, cite, code, +del, dfn, em, font, img, ins, kbd, q, s, samp, +small, strike, strong, sub, sup, tt, var, +b, u, i, center, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend, +table, caption, tbody, tfoot, thead, tr, th, td, +header, nav, section, article, aside, footer +{border: 0; margin: 0; outline: 0; padding: 0; background: transparent; vertical-align: baseline;} + +blockquote, q {quotes: none;} +blockquote:before,blockquote:after,q:before,q:after {content: ''; content: none;} + +header, nav, section, article, aside, footer {display: block;} + +/* Basic styles */ +body {background: #fff; color: #000; font: .75em/1.5em Helvetica, Arial, "Liberation Sans", "Bitstream Vera Sans", sans-serif;} +html>body {font-size: 12px;} + +img {display: inline-block; vertical-align: bottom;} + +h1,h2,h3,h4,h5,h6,strong,b,dt,th {font-weight: 700;} +address,cite,em,i,caption,dfn,var {font-style: italic;} + +h1 {margin: 0 0 0.75em; font-size: 2em;} +h2 {margin: 0 0 1em; font-size: 1.5em;} +h3 {margin: 0 0 1.286em; font-size: 1.167em;} +h4 {margin: 0 0 1.5em; font-size: 1em;} +h5 {margin: 0 0 1.8em; font-size: .834em;} +h6 {margin: 0 0 2em; font-size: .75em;} + +p,ul,ol,dl,blockquote,pre {margin: 0;} + +li ul,li ol {margin: 0;} +ul {list-style:none;} +ol {list-style: outside decimal;} +li {margin: 0;} +dd {padding-left: 1.5em;} +blockquote {padding: 0 1.5em;} + +a {text-decoration: underline;} +a:hover {text-decoration: none;} +abbr,acronym {border-bottom: 1px dotted; cursor: help;} +del {text-decoration: line-through;} +ins {text-decoration: overline;} +sub {font-size: .834em; line-height: 1em; vertical-align: sub;} +sup {font-size: .834em; line-height: 1em; vertical-align: super;} + +tt,code,kbd,samp,pre {font-size: 1em; font-family: "Courier New", Courier, monospace;} + +/* Table styles */ +table {border-collapse: collapse; border-spacing: 0; margin: 0 0 1.5em; border:none;} +caption {text-align: left;} +th, td {padding: .25em .5em;} +tbody td, tbody th {/*border: 1px solid #000;*/} +tfoot {font-style: italic;} + +/* Form styles */ +fieldset {clear: both;} +legend {padding: 0 0 1.286em; font-size: 1.167em; font-weight: 700;} +fieldset fieldset legend {padding: 0 0 1.5em; font-size: 1em;} +* html legend {margin-left: -7px;} +*+html legend {margin-left: -7px;} + +form .field, form .buttons {clear: both; margin: 0 0 1.5em;} +form .field label {display: block;} +form ul.fields li {list-style-type: none; margin: 0;} +form ul.inline li, form ul.inline label {display: inline;} +form ul.inline li {padding: 0 .75em 0 0;} + +input.radio, input.checkbox {vertical-align: top;} +label, button, input.submit, input.image {cursor: pointer;} +* html input.radio, * html input.checkbox {vertical-align: middle;} +*+html input.radio, *+html input.checkbox {vertical-align: middle;} + +textarea {overflow: auto;} +input.text, input.password, textarea, select {margin: 0; font: 1em/1.3 Helvetica, Arial, "Liberation Sans", "Bitstream Vera Sans", sans-serif; vertical-align: baseline;} +input.text, input.password, textarea {border: 1px solid #444; border-bottom-color: #666; border-right-color: #666; padding: 2px;} + +* html button {margin: 0 .34em 0 0;} +*+html button {margin: 0 .34em 0 0;} + +form.horizontal .field {padding-left: 150px;} +form.horizontal .field label {display: inline; float: left; width: 140px; margin-left: -150px;} + +/* Useful classes */ +img.left {display: inline; float: left; margin: 0 1.5em .75em 0;} +img.right {display: inline; float: right; margin: 0 0 .75em .75em;} + +ul {color:#FFF; padding:0 10px 0 0;} + +body { + background:url(img/twicet_alternate_bg.png) #000 center top no-repeat; + font-family:'Lucida Grande','Lucida Sans Unicode',Tahoma,Arial,san-serif; +} + +.wrapper { + width:950px; + margin:0 auto 30px auto; + position:relative; +} + +.icons { + text-align:right; + height:20px; + padding:5px 10px; +} + +.hdr { + height:47px; + background:url(img/hdr-planet.jpg) #9DB8D2 right bottom no-repeat; + position:relative; + border-bottom:1px solid #000; +} + +.logo { + width:64px; + height:54px; + background:url(img/logo.png) no-repeat; + z-index:100; + position:absolute; + top:-2px; + left:10px; +} + +.top { + height:27px; + background:url(img/sites-bg.png) repeat-x; + margin:0 0 20px 0; +} + +.toplist1 li.home { + float:left; +} +.toplist1 li { + float:right; +} + +.toplist1 li a { + display:inline; + color:#FFF; + line-height:27px; + margin:0 0 0 10px; + font-family:"Trebuchet MS", Arial, Helvetica, sans-serif; + font-weight:bold; +} + +.toplist1 a#arsiv { + margin:0 5px 0 0; + padding:0 22px 0 0; + background:url(img/1276242580_arrow_large_up%20-%20Kopya.png) right center no-repeat; +} +.navlist { + display:none; + position:absolute; + z-index:1000; + margin:0 0 0 -400px; +} + +.navlist li { + float:left; + width:120px; + background:#000; + text-align:center; + background:url(img/sites-bg.png) repeat-x; +} + +.navlist li a { + display:block; + margin:0; + line-height:22px; +} + +.navlist li a:hover { + +} + +.contenttop { + background:url(img/bg_slice.png) no-repeat scroll transparent; + height:37px; + width:950px; +} + +.contenttop p { + line-height:37px; + padding:0 0 0 20px; + color:#FFF; + font-family:'Lucida Grande','Lucida Sans Unicode',Tahoma,Arial,san-serif; +} + +.content { + border-right:1px solid #CCC; + border-left:1px solid #CCC; + background:#FFF; +} + +.date { + padding:20px 0 0 20px; +} + +.separator { + height:10px; + background:#999; + border-top:1px solid #CCC; + border-bottom:1px solid #CCC; + margin:20px 0; + clear:both; +} + +.innercontent {padding:20px;} + +.face { + clear:both; + float:left; + width:80px; + height:80px; +} + +.title { + float:right; + text-align:center; + width:825px; + height:22px; +} + +.title a { + color: #000; +} + +.yazaneden { + float:right; + width:808px; + height:20px; + background:#FFFFDD; + padding:10px; + text-align:center; + margin:0 auto 20px; +} +.blogdate { + margin:0 20px 0 0; +} + +.blogcontent { + clear:both; + margin:0 20px 20px 20px; + font-family:'Lucida Grande','Lucida Sans Unicode',Tahoma,Arial,san-serif; + font-size:12.5px; +} + +.blogcontent p { + margin:16px 0 0 0; +} + +.blogcontent img { + float:right; + margin:0 0 20px 10px; +} + +.blogcontent ul { + margin:10px 0 10px 20px; + color:#000; +} + +.blogcontent ul li { + padding:0 0 4px 25px; + background:url(img/bullet.png) left center no-repeat; +} + +.footer { + width:951px; + height:66px; + background:url(img/footer_bg_slice.png) scroll no-repeat transparent; + left:-1px; + position:absolute; + padding:0 0 20px 0; +} + +.footer p { + line-height:45px; + color:#FFF; + padding:0 0 0 20px; +} diff --git a/DJAGEN/yeni_tasarim/script.js b/DJAGEN/yeni_tasarim/script.js new file mode 100755 index 0000000..6210b0f --- /dev/null +++ b/DJAGEN/yeni_tasarim/script.js @@ -0,0 +1,7 @@ +// JavaScript Document +$(document).ready(function(){ + $("#arsiv").click(function(e){ + e.preventDefault(); + $(".navlist").slideToggle('medium'); + }); +}); \ No newline at end of file diff --git a/DJAGEN/yeni_tasarim/slider.js b/DJAGEN/yeni_tasarim/slider.js new file mode 100755 index 0000000..7468937 --- /dev/null +++ b/DJAGEN/yeni_tasarim/slider.js @@ -0,0 +1,5 @@ +$(document).ready(function () { + $('img.menu_class').click(function () { + $('ul.the_menu').slideToggle('medium'); + }); +}); \ No newline at end of file