summaryrefslogtreecommitdiff
path: root/tsp
diff options
context:
space:
mode:
authorAleksander Mistewicz <a.mistewicz@samsung.com>2016-06-09 11:44:57 +0200
committerAleksander Mistewicz <a.mistewicz@samsung.com>2016-08-24 10:54:50 +0200
commitf98afdbe644bd48c6b72e3352e698901f923dcfb (patch)
treeb697bed43f6d1c4d36b8c35407e27889db32b9eb /tsp
parent2320429ce2eace8ea847487428dbd2cbc665a7be (diff)
downloadmajor-f98afdbe644bd48c6b72e3352e698901f923dcfb.tar.gz
major-f98afdbe644bd48c6b72e3352e698901f923dcfb.tar.bz2
major-f98afdbe644bd48c6b72e3352e698901f923dcfb.zip
Modify tsp behaviour to comply with OBS changes
Use filenames that won't confuse future developer Change-Id: I3c0928045ab169b20333c94f762d6be64f58a17c Signed-off-by: Aleksander Mistewicz <a.mistewicz@samsung.com>
Diffstat (limited to 'tsp')
-rwxr-xr-xtsp/jobs/trigger_downloads.sh4
-rwxr-xr-xtsp/jobs/watcher.sh2
-rwxr-xr-xtsp/scripts/crawler.py44
3 files changed, 40 insertions, 10 deletions
diff --git a/tsp/jobs/trigger_downloads.sh b/tsp/jobs/trigger_downloads.sh
index 32832ac..e42e4a1 100755
--- a/tsp/jobs/trigger_downloads.sh
+++ b/tsp/jobs/trigger_downloads.sh
@@ -25,7 +25,7 @@ cd "${WS_WATCHER}"
touch next_dwn
i=$(cat next_dwn)
test -n "$i" || i=1
-for url in $(cat new_urls)
+for url in $(cat modified_urls)
do
echo "Processing: ${url}"
if [ $url = "*latest*" ]
@@ -33,6 +33,8 @@ do
echo "Skipping symlink url"
continue
fi
+ # kill currently running jobs for this url
+ for i in $(tsmaster | awk -v URL="$url" '$2 ~ "running" && $0 ~ URL {print $1}'); do kill `tsmaster -p $i`; done
for target in "minnow" "odroid"
do
mkdir -p "${WS_DOWNLOAD}/$i"
diff --git a/tsp/jobs/watcher.sh b/tsp/jobs/watcher.sh
index 4c1e7ff..4502c2d 100755
--- a/tsp/jobs/watcher.sh
+++ b/tsp/jobs/watcher.sh
@@ -23,6 +23,8 @@ export TSP_DIR="$(CDPATH= cd -- "$(dirname -- "$0")" && pwd -P)/.."
mkdir -p "${WS_WATCHER}"
cd "${WS_WATCHER}"
touch dispatched_urls
+touch timestamp
+touch timestamp_snapshot
nr=$(tspoll -L PRERELEASE_WATCHER sh -c "ROOT=\"http://download.tizen.org/prerelease/tizen/common/\" \
${TSP_DIR}/scripts/crawler.py")
nr=$(tspoll -D $nr -L DOWNLOAD_TRIGGER sh -c "${TSP_DIR}/jobs/trigger_downloads.sh")
diff --git a/tsp/scripts/crawler.py b/tsp/scripts/crawler.py
index c61510a..59429e2 100755
--- a/tsp/scripts/crawler.py
+++ b/tsp/scripts/crawler.py
@@ -20,10 +20,11 @@
import os
import urllib2
+import time
import bs4
-new_urls = 'new_urls'
+discovered_urls = 'modified_urls'
dispatched_urls = 'dispatched_urls'
root = os.environ.get('ROOT', 'http://download.tizen.org/prerelease/tizen/common/')
@@ -57,26 +58,51 @@ def crawl(seeds):
return discovered
+def get_modified_paths(discovered, timestamp):
+ ret = set()
+ str_time = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(time.time()))
+ if os.path.exists(dispatched_urls):
+ with open(timestamp, 'r') as f:
+ stamp = f.read();
+ else:
+ return discovered
+ with open(timestamp, 'w') as f:
+ f.write(str_time)
+ for url in discovered:
+ for md5sums_url in [url + 'images/arm-wayland/common-wayland-3parts-armv7l-odroidu3/MD5SUMS',\
+ url + 'images/x86_64-wayland/common-wayland-efi-x86_64/MD5SUMS',\
+ url + 'images/ia32-wayland/common-wayland-efi-i586/MD5SUMS']:
+ try:
+ u = urllib2.urlopen(urllib2.Request(md5sums_url, headers={"If-Modified-Since": stamp}))
+ except urllib2.HTTPError as e:
+ pass
+ else:
+ ret.add(url)
+ break
+ return ret
+
if '__main__' == __name__:
snapshots = crawl(seeds)
+ timestamp_file = 'timestamp'
if "snapshots" in root:
- new = snapshots
+ timestamp_file = 'timestamp_snapshot'
+ discovered = snapshots
else:
- new = set()
+ discovered = set()
for snapshot in snapshots:
- new |= crawl((snapshot,))
+ discovered |= crawl((snapshot,))
if os.path.exists(dispatched_urls):
with open(dispatched_urls, 'r') as f:
dispatched = set([url.rstrip() for url in f.readlines()])
- # save new URLs for dispatching download requests
- new -= dispatched
- with open(new_urls, 'w') as f:
- f.write('\n'.join(new))
+ # save discovered URLs for dispatching download requests
+ modified = get_modified_paths(discovered, timestamp_file)
+ with open(discovered_urls, 'w') as f:
+ f.write('\n'.join(modified))
# save all URLs for storing download history
- dispatched |= new
+ dispatched |= modified
with open(dispatched_urls, 'w') as f:
f.write('\n'.join(dispatched))