Project

General

Profile

root / trunk / src / haizea / core / frontends / tracefile.py @ 632

1
# -------------------------------------------------------------------------- #
2
# Copyright 2006-2008, University of Chicago                                 #
3
# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
4
# Complutense de Madrid (dsa-research.org)                                   #
5
#                                                                            #
6
# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
7
# not use this file except in compliance with the License. You may obtain    #
8
# a copy of the License at                                                   #
9
#                                                                            #
10
# http://www.apache.org/licenses/LICENSE-2.0                                 #
11
#                                                                            #
12
# Unless required by applicable law or agreed to in writing, software        #
13
# distributed under the License is distributed on an "AS IS" BASIS,          #
14
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
15
# See the License for the specific language governing permissions and        #
16
# limitations under the License.                                             #
17
# -------------------------------------------------------------------------- #
18

    
19
import haizea.common.constants as constants
20
from haizea.common.utils import get_clock
21
from haizea.core.frontends import RequestFrontend
22
from haizea.core.leases import LeaseWorkload, Lease
23
import operator
24
import logging
25

    
26
class TracefileFrontend(RequestFrontend):
27
    def __init__(self, manager, starttime):
28
        RequestFrontend.__init__(self, manager)
29
        self.logger = logging.getLogger("TFILE")
30
        config = manager.config
31

    
32
        tracefile = config.get("tracefile")
33
        injectfile = config.get("injectionfile")
34
        imagefile = config.get("imagefile")
35
        
36
        # Read trace file
37
        # Requests is a list of lease requests
38
        self.logger.info("Loading tracefile %s" % tracefile)
39
        self.requests = None
40
        if tracefile.endswith(".swf"):
41
            self.requests = LeaseWorkload.from_swf_file(tracefile, starttime)
42
        elif tracefile.endswith(".lwf") or tracefile.endswith(".xml"):
43
            lease_workload = LeaseWorkload.from_xml_file(tracefile, starttime)
44
            self.requests = lease_workload.get_leases()
45
    
46
        if injectfile != None:
47
            self.logger.info("Loading injection file %s" % injectfile)
48
            inj_lease_workload = LeaseWorkload.from_xml_file(injectfile, starttime)
49
            inj_leases = inj_lease_workload.get_leases()
50
            self.requests += inj_leases
51
            self.requests.sort(key=operator.attrgetter("submit_time"))
52

    
53
        if imagefile != None:
54
            self.logger.info("Loading image file %s" % imagefile)
55
            file = open (imgfile, "r")
56
            imagesizes = {}
57
            images = []
58
            state = 0  # 0 -> Reading image sizes  1 -> Reading image sequence
59
            for line in file:
60
                if line[0]=='#':
61
                    state = 1
62
                elif state == 0:
63
                    image, size = line.split()
64
                    imagesizes[image] = int(size)
65
                elif state == 1:
66
                    images.append(line.strip())            
67
            for lease, image_id in zip(self.requests, images):
68
                lease.software = DiskImageSoftwareEnvironment(image_id, imagesizes[image_id])
69
        
70
        # Add runtime overhead, if necessary
71
        add_overhead = config.get("add-overhead")
72
        
73
        if add_overhead != constants.RUNTIMEOVERHEAD_NONE:
74
            slowdown_overhead = config.get("runtime-slowdown-overhead")
75
            boot_overhead = config.get("bootshutdown-overhead")
76
            for r in self.requests:
77
                if add_overhead == constants.RUNTIMEOVERHEAD_ALL or (add_overhead == constants.RUNTIMEOVERHEAD_BE and isinstance(r,BestEffortLease)):
78
                   if slowdown_overhead != 0:
79
                       r.add_runtime_overhead(slowdown_overhead)
80
                   r.add_boot_overhead(boot_overhead)
81

    
82
        # Override requested memory, if necessary
83
        memory = config.get("override-memory")
84
        if memory != constants.NO_MEMORY_OVERRIDE:
85
            for r in self.requests:
86
                r.requested_resources.set_by_type(constants.RES_MEM, memory)            
87
            
88
        types = {}
89
        for r in self.requests:
90
            types[r.get_type()] = types.setdefault(r.get_type(), 0) + 1
91
        types_str = " + ".join(["%i %s" % (types[t],Lease.type_str[t]) for t in types])
92

    
93
        self.logger.info("Loaded workload with %i requests (%s)" % (len(self.requests), types_str))
94
        
95
        
96
    def get_accumulated_requests(self):
97
        # When reading from a trace file, there are no
98
        # "accumulated requests". Rather, we just take whatever
99
        # requests are in the trace up to the current time
100
        # reported by the resource manager
101
        time = get_clock().get_time()
102
        nowreq = [r for r in self.requests if r.submit_time <= time]
103
        self.requests = [r for r in self.requests if r.submit_time > time]   
104
        return nowreq              
105

    
106
    def exists_more_requests(self):
107
        return len(self.requests) != 0
108

    
109
    def get_next_request_time(self):
110
        if self.exists_more_requests():
111
            return self.requests[0].submit_time
112
        else:
113
            return None