1# -*- coding: utf-8 -*- 2# (c) 2009-2020 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav 3# Original PyFileServer (c) 2005 Ho Chun Wei. 4# Licensed under the MIT license: 5# http://www.opensource.org/licenses/mit-license.php 6""" 7WSGI application that handles one single WebDAV request. 8""" 9from wsgidav import compat, util, xml_tools 10from wsgidav.dav_error import ( 11 as_DAVError, 12 DAVError, 13 get_http_status_string, 14 HTTP_BAD_GATEWAY, 15 HTTP_BAD_REQUEST, 16 HTTP_CONFLICT, 17 HTTP_CREATED, 18 HTTP_FAILED_DEPENDENCY, 19 HTTP_FORBIDDEN, 20 HTTP_INTERNAL_ERROR, 21 HTTP_LENGTH_REQUIRED, 22 HTTP_MEDIATYPE_NOT_SUPPORTED, 23 HTTP_METHOD_NOT_ALLOWED, 24 HTTP_NO_CONTENT, 25 HTTP_NOT_FOUND, 26 HTTP_NOT_IMPLEMENTED, 27 HTTP_OK, 28 HTTP_PRECONDITION_FAILED, 29 HTTP_RANGE_NOT_SATISFIABLE, 30 PRECONDITION_CODE_LockTokenMismatch, 31 PRECONDITION_CODE_PropfindFiniteDepth, 32) 33from wsgidav.util import etree 34 35 36__docformat__ = "reStructuredText" 37 38_logger = util.get_module_logger(__name__) 39 40DEFAULT_BLOCK_SIZE = 8192 41 42 43# ======================================================================== 44# RequestServer 45# ======================================================================== 46class RequestServer(object): 47 def __init__(self, dav_provider): 48 self._davProvider = dav_provider 49 self.allow_propfind_infinite = True 50 self._verbose = 3 51 self.block_size = DEFAULT_BLOCK_SIZE 52 # _logger.debug("RequestServer: __init__") 53 54 self._possible_methods = ["OPTIONS", "HEAD", "GET", "PROPFIND"] 55 # if self._davProvider.prop_manager is not None: 56 # self._possible_methods.extend( [ "PROPFIND" ] ) 57 if not self._davProvider.is_readonly(): 58 self._possible_methods.extend( 59 ["PUT", "DELETE", "COPY", "MOVE", "MKCOL", "PROPPATCH", "POST"] 60 ) 61 # if self._davProvider.prop_manager is not None: 62 # self._possible_methods.extend( [ "PROPPATCH" ] ) 63 if self._davProvider.lock_manager is not None: 64 self._possible_methods.extend(["LOCK", "UNLOCK"]) 65 66 def __del__(self): 67 # _logger.debug("RequestServer: __del__") 68 pass 69 70 def __call__(self, environ, start_response): 71 assert "wsgidav.verbose" in environ 72 provider = self._davProvider 73 # TODO: allow anonymous somehow: this should run, even if http_authenticator middleware 74 # is not installed 75 # assert "wsgidav.auth.user_name" in environ 76 if "wsgidav.auth.user_name" not in environ: 77 _logger.warning("Missing 'wsgidav.auth.user_name' in environ") 78 79 environ["wsgidav.user_name"] = environ.get( 80 "wsgidav.auth.user_name", "anonymous" 81 ) 82 requestmethod = environ["REQUEST_METHOD"] 83 84 self.block_size = environ["wsgidav.config"].get( 85 "block_size", DEFAULT_BLOCK_SIZE 86 ) 87 88 # Convert 'infinity' and 'T'/'F' to a common case 89 if environ.get("HTTP_DEPTH") is not None: 90 environ["HTTP_DEPTH"] = environ["HTTP_DEPTH"].lower() 91 if environ.get("HTTP_OVERWRITE") is not None: 92 environ["HTTP_OVERWRITE"] = environ["HTTP_OVERWRITE"].upper() 93 94 if "HTTP_EXPECT" in environ: 95 pass 96 97 # Dispatch HTTP request methods to 'do_METHOD()' handlers 98 method = None 99 if requestmethod in self._possible_methods: 100 method_name = "do_{}".format(requestmethod) 101 method = getattr(self, method_name, None) 102 if not method: 103 _logger.error("Invalid HTTP method {!r}".format(requestmethod)) 104 self._fail(HTTP_METHOD_NOT_ALLOWED) 105 106 if environ.get("wsgidav.debug_break"): 107 pass # Set a break point here 108 109 if environ.get("wsgidav.debug_profile"): 110 from cProfile import Profile 111 112 profile = Profile() 113 res = profile.runcall( 114 provider.custom_request_handler, environ, start_response, method 115 ) 116 # sort: 0:"calls",1:"time", 2: "cumulative" 117 profile.print_stats(sort=2) 118 for v in res: 119 yield v 120 if hasattr(res, "close"): 121 res.close() 122 return 123 124 # Run requesthandler (provider may override, #55) 125 # _logger.warning("#1...") 126 app_iter = provider.custom_request_handler(environ, start_response, method) 127 # _logger.warning("#1... 2") 128 try: 129 # _logger.warning("#1... 3") 130 for v in app_iter: 131 # _logger.warning("#1... 4") 132 yield v 133 # _logger.warning("#1... 5") 134 # except Exception: 135 # _logger.warning("#1... 6") 136 # _logger.exception("") 137 # status = "500 Oops" 138 # response_headers = [("content-type", "text/plain")] 139 # start_response(status, response_headers, sys.exc_info()) 140 # return ["error body goes here"] 141 finally: 142 # _logger.warning("#1... 7") 143 if hasattr(app_iter, "close"): 144 # _logger.warning("#1... 8") 145 app_iter.close() 146 return 147 148 def _fail(self, value, context_info=None, src_exception=None, err_condition=None): 149 """Wrapper to raise (and log) DAVError.""" 150 util.fail(value, context_info, src_exception, err_condition) 151 152 def _send_response( 153 self, environ, start_response, root_res, success_code, error_list 154 ): 155 """Send WSGI response (single or multistatus). 156 157 - If error_list is None or [], then <success_code> is send as response. 158 - If error_list contains a single error with a URL that matches root_res, 159 then this error is returned. 160 - If error_list contains more than one error, then '207 Multi-Status' is 161 returned. 162 """ 163 assert success_code in (HTTP_CREATED, HTTP_NO_CONTENT, HTTP_OK) 164 if not error_list: 165 # Status OK 166 return util.send_status_response(environ, start_response, success_code) 167 if len(error_list) == 1 and error_list[0][0] == root_res.get_href(): 168 # Only one error that occurred on the root resource 169 return util.send_status_response(environ, start_response, error_list[0][1]) 170 171 # Multiple errors, or error on one single child 172 multistatusEL = xml_tools.make_multistatus_el() 173 174 for refurl, e in error_list: 175 # assert refurl.startswith("http:") 176 assert refurl.startswith("/") 177 assert isinstance(e, DAVError) 178 responseEL = etree.SubElement(multistatusEL, "{DAV:}response") 179 etree.SubElement(responseEL, "{DAV:}href").text = refurl 180 etree.SubElement(responseEL, "{DAV:}status").text = "HTTP/1.1 {}".format( 181 get_http_status_string(e) 182 ) 183 184 return util.send_multi_status_response(environ, start_response, multistatusEL) 185 186 def _check_write_permission(self, res, depth, environ): 187 """Raise DAVError(HTTP_LOCKED), if res is locked. 188 189 If depth=='infinity', we also raise when child resources are locked. 190 """ 191 lock_man = self._davProvider.lock_manager 192 if lock_man is None or res is None: 193 return True 194 195 ref_url = res.get_ref_url() 196 197 if "wsgidav.conditions.if" not in environ: 198 util.parse_if_header_dict(environ) 199 200 # raise HTTP_LOCKED if conflict exists 201 lock_man.check_write_permission( 202 ref_url, 203 depth, 204 environ["wsgidav.ifLockTokenList"], 205 environ["wsgidav.user_name"], 206 ) 207 208 def _evaluate_if_headers(self, res, environ): 209 """Apply HTTP headers on <path>, raising DAVError if conditions fail. 210 211 Add environ['wsgidav.conditions.if'] and environ['wsgidav.ifLockTokenList']. 212 Handle these headers: 213 214 - If-Match, If-Modified-Since, If-None-Match, If-Unmodified-Since: 215 Raising HTTP_PRECONDITION_FAILED or HTTP_NOT_MODIFIED 216 - If: 217 Raising HTTP_PRECONDITION_FAILED 218 219 @see http://www.webdav.org/specs/rfc4918.html#HEADER_If 220 @see util.evaluate_http_conditionals 221 """ 222 # Add parsed If header to environ 223 if "wsgidav.conditions.if" not in environ: 224 util.parse_if_header_dict(environ) 225 226 # Bail out, if res does not exist 227 if res is None: 228 return 229 230 ifDict = environ["wsgidav.conditions.if"] 231 232 # Raise HTTP_PRECONDITION_FAILED or HTTP_NOT_MODIFIED, if standard 233 # HTTP condition fails 234 last_modified = -1 # nonvalid modified time 235 entitytag = "[]" # Non-valid entity tag 236 if res.get_last_modified() is not None: 237 last_modified = int(res.get_last_modified()) 238 if res.get_etag() is not None: 239 entitytag = res.get_etag() 240 241 if ( 242 "HTTP_IF_MODIFIED_SINCE" in environ 243 or "HTTP_IF_UNMODIFIED_SINCE" in environ 244 or "HTTP_IF_MATCH" in environ 245 or "HTTP_IF_NONE_MATCH" in environ 246 ): 247 util.evaluate_http_conditionals(res, last_modified, entitytag, environ) 248 249 if "HTTP_IF" not in environ: 250 return 251 252 # Raise HTTP_PRECONDITION_FAILED, if DAV 'If' condition fails 253 # TODO: handle empty locked resources 254 # TODO: handle unmapped locked resources 255 # isnewfile = not provider.exists(mappedpath) 256 257 ref_url = res.get_ref_url() 258 lock_man = self._davProvider.lock_manager 259 locktokenlist = [] 260 if lock_man: 261 lockList = lock_man.get_indirect_url_lock_list( 262 ref_url, environ["wsgidav.user_name"] 263 ) 264 for lock in lockList: 265 locktokenlist.append(lock["token"]) 266 267 if not util.test_if_header_dict(res, ifDict, ref_url, locktokenlist, entitytag): 268 self._fail(HTTP_PRECONDITION_FAILED, "'If' header condition failed.") 269 270 return 271 272 def do_PROPFIND(self, environ, start_response): 273 """ 274 TODO: does not yet support If and If HTTP Conditions 275 @see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND 276 """ 277 path = environ["PATH_INFO"] 278 res = self._davProvider.get_resource_inst(path, environ) 279 280 # RFC: By default, the PROPFIND method without a Depth header MUST act 281 # as if a "Depth: infinity" header was included. 282 environ.setdefault("HTTP_DEPTH", "infinity") 283 if not environ["HTTP_DEPTH"] in ("0", "1", "infinity"): 284 self._fail( 285 HTTP_BAD_REQUEST, 286 "Invalid Depth header: '{}'.".format(environ["HTTP_DEPTH"]), 287 ) 288 289 if environ["HTTP_DEPTH"] == "infinity" and not self.allow_propfind_infinite: 290 self._fail( 291 HTTP_FORBIDDEN, 292 "PROPFIND 'infinite' was disabled for security reasons.", 293 err_condition=PRECONDITION_CODE_PropfindFiniteDepth, 294 ) 295 296 if res is None: 297 self._fail(HTTP_NOT_FOUND, path) 298 299 if environ.get("wsgidav.debug_break"): 300 pass # break point 301 302 self._evaluate_if_headers(res, environ) 303 304 # Parse PROPFIND request 305 requestEL = util.parse_xml_body(environ, allow_empty=True) 306 if requestEL is None: 307 # An empty PROPFIND request body MUST be treated as a request for 308 # the names and values of all properties. 309 requestEL = etree.XML( 310 "<D:propfind xmlns:D='DAV:'><D:allprop/></D:propfind>" 311 ) 312 313 if requestEL.tag != "{DAV:}propfind": 314 self._fail(HTTP_BAD_REQUEST) 315 316 propNameList = [] 317 propFindMode = None 318 for pfnode in requestEL: 319 if pfnode.tag == "{DAV:}allprop": 320 if propFindMode: 321 # RFC: allprop and name are mutually exclusive 322 self._fail(HTTP_BAD_REQUEST) 323 propFindMode = "allprop" 324 # TODO: implement <include> option 325 # elif pfnode.tag == "{DAV:}include": 326 # if not propFindMode in (None, "allprop"): 327 # self._fail(HTTP_BAD_REQUEST, 328 # "<include> element is only valid with 'allprop'.") 329 # for pfpnode in pfnode: 330 # propNameList.append(pfpnode.tag) 331 elif pfnode.tag == "{DAV:}name": 332 if propFindMode: # RFC: allprop and name are mutually exclusive 333 self._fail(HTTP_BAD_REQUEST) 334 propFindMode = "name" 335 elif pfnode.tag == "{DAV:}prop": 336 # RFC: allprop and name are mutually exclusive 337 if propFindMode not in (None, "named"): 338 self._fail(HTTP_BAD_REQUEST) 339 propFindMode = "named" 340 for pfpnode in pfnode: 341 propNameList.append(pfpnode.tag) 342 343 # --- Build list of resource URIs 344 345 reslist = res.get_descendants(depth=environ["HTTP_DEPTH"], add_self=True) 346 # if environ["wsgidav.verbose"] >= 3: 347 # pprint(reslist, indent=4) 348 349 multistatusEL = xml_tools.make_multistatus_el() 350 responsedescription = [] 351 352 for child in reslist: 353 354 if propFindMode == "allprop": 355 propList = child.get_properties("allprop") 356 elif propFindMode == "name": 357 propList = child.get_properties("name") 358 else: 359 propList = child.get_properties("named", name_list=propNameList) 360 361 href = child.get_href() 362 util.add_property_response(multistatusEL, href, propList) 363 364 if responsedescription: 365 etree.SubElement( 366 multistatusEL, "{DAV:}responsedescription" 367 ).text = "\n".join(responsedescription) 368 369 return util.send_multi_status_response(environ, start_response, multistatusEL) 370 371 def do_PROPPATCH(self, environ, start_response): 372 """Handle PROPPATCH request to set or remove a property. 373 374 @see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH 375 """ 376 path = environ["PATH_INFO"] 377 res = self._davProvider.get_resource_inst(path, environ) 378 379 # Only accept Depth: 0 (but assume this, if omitted) 380 environ.setdefault("HTTP_DEPTH", "0") 381 if environ["HTTP_DEPTH"] != "0": 382 self._fail(HTTP_BAD_REQUEST, "Depth must be '0'.") 383 384 if res is None: 385 self._fail(HTTP_NOT_FOUND, path) 386 387 self._evaluate_if_headers(res, environ) 388 self._check_write_permission(res, "0", environ) 389 390 # Parse request 391 requestEL = util.parse_xml_body(environ) 392 393 if requestEL.tag != "{DAV:}propertyupdate": 394 self._fail(HTTP_BAD_REQUEST) 395 396 # Create a list of update request tuples: (name, value) 397 propupdatelist = [] 398 399 for ppnode in requestEL: 400 propupdatemethod = None 401 if ppnode.tag == "{DAV:}remove": 402 propupdatemethod = "remove" 403 elif ppnode.tag == "{DAV:}set": 404 propupdatemethod = "set" 405 else: 406 self._fail( 407 HTTP_BAD_REQUEST, "Unknown tag (expected 'set' or 'remove')." 408 ) 409 410 for propnode in ppnode: 411 if propnode.tag != "{DAV:}prop": 412 self._fail(HTTP_BAD_REQUEST, "Unknown tag (expected 'prop').") 413 414 for propertynode in propnode: 415 propvalue = None 416 if propupdatemethod == "remove": 417 propvalue = None # Mark as 'remove' 418 if len(propertynode) > 0: 419 # 14.23: All the XML elements in a 'prop' XML 420 # element inside of a 'remove' XML element MUST be 421 # empty 422 self._fail( 423 HTTP_BAD_REQUEST, 424 "prop element must be empty for 'remove'.", 425 ) 426 else: 427 propvalue = propertynode 428 429 propupdatelist.append((propertynode.tag, propvalue)) 430 431 # Apply updates in SIMULATION MODE and create a result list (name, 432 # result) 433 successflag = True 434 writeresultlist = [] 435 436 for (name, propvalue) in propupdatelist: 437 try: 438 res.set_property_value(name, propvalue, dry_run=True) 439 except Exception as e: 440 writeresult = as_DAVError(e) 441 else: 442 writeresult = "200 OK" 443 writeresultlist.append((name, writeresult)) 444 successflag = successflag and writeresult == "200 OK" 445 446 # Generate response list of 2-tuples (name, value) 447 # <value> is None on success, or an instance of DAVError 448 propResponseList = [] 449 responsedescription = [] 450 451 if not successflag: 452 # If dry run failed: convert all OK to FAILED_DEPENDENCY. 453 for (name, result) in writeresultlist: 454 if result == "200 OK": 455 result = DAVError(HTTP_FAILED_DEPENDENCY) 456 elif isinstance(result, DAVError): 457 responsedescription.append(result.get_user_info()) 458 propResponseList.append((name, result)) 459 460 else: 461 # Dry-run succeeded: set properties again, this time in 'real' mode 462 # In theory, there should be no exceptions thrown here, but this is 463 # real live... 464 for (name, propvalue) in propupdatelist: 465 try: 466 res.set_property_value(name, propvalue, dry_run=False) 467 # Set value to None, so the response xml contains empty tags 468 propResponseList.append((name, None)) 469 except Exception as e: 470 e = as_DAVError(e) 471 propResponseList.append((name, e)) 472 responsedescription.append(e.get_user_info()) 473 474 # Generate response XML 475 multistatusEL = xml_tools.make_multistatus_el() 476 href = res.get_href() 477 util.add_property_response(multistatusEL, href, propResponseList) 478 if responsedescription: 479 etree.SubElement( 480 multistatusEL, "{DAV:}responsedescription" 481 ).text = "\n".join(responsedescription) 482 483 # Send response 484 return util.send_multi_status_response(environ, start_response, multistatusEL) 485 486 def do_MKCOL(self, environ, start_response): 487 """Handle MKCOL request to create a new collection. 488 489 @see http://www.webdav.org/specs/rfc4918.html#METHOD_MKCOL 490 """ 491 path = environ["PATH_INFO"] 492 provider = self._davProvider 493 # res = provider.get_resource_inst(path, environ) 494 495 # Do not understand ANY request body entities 496 if util.get_content_length(environ) != 0: 497 self._fail( 498 HTTP_MEDIATYPE_NOT_SUPPORTED, 499 "The server does not handle any body content.", 500 ) 501 502 # Only accept Depth: 0 (but assume this, if omitted) 503 if environ.setdefault("HTTP_DEPTH", "0") != "0": 504 self._fail(HTTP_BAD_REQUEST, "Depth must be '0'.") 505 506 if provider.exists(path, environ): 507 self._fail( 508 HTTP_METHOD_NOT_ALLOWED, 509 "MKCOL can only be executed on an unmapped URL.", 510 ) 511 512 parentRes = provider.get_resource_inst(util.get_uri_parent(path), environ) 513 if not parentRes or not parentRes.is_collection: 514 self._fail(HTTP_CONFLICT, "Parent must be an existing collection.") 515 516 # TODO: should we check If headers here? 517 # self._evaluate_if_headers(res, environ) 518 # Check for write permissions on the PARENT 519 self._check_write_permission(parentRes, "0", environ) 520 521 parentRes.create_collection(util.get_uri_name(path)) 522 523 return util.send_status_response(environ, start_response, HTTP_CREATED) 524 525 def do_POST(self, environ, start_response): 526 """ 527 @see http://www.webdav.org/specs/rfc4918.html#METHOD_POST 528 @see http://stackoverflow.com/a/22606899/19166 529 """ 530 self._fail(HTTP_METHOD_NOT_ALLOWED) 531 532 def do_DELETE(self, environ, start_response): 533 """ 534 @see: http://www.webdav.org/specs/rfc4918.html#METHOD_DELETE 535 """ 536 path = environ["PATH_INFO"] 537 provider = self._davProvider 538 res = provider.get_resource_inst(path, environ) 539 540 # --- Check request preconditions ------------------------------------- 541 542 if util.get_content_length(environ) != 0: 543 self._fail( 544 HTTP_MEDIATYPE_NOT_SUPPORTED, 545 "The server does not handle any body content.", 546 ) 547 if res is None: 548 self._fail(HTTP_NOT_FOUND, path) 549 550 if res.is_collection: 551 # Delete over collection 552 # "The DELETE method on a collection MUST act as if a 553 # 'Depth: infinity' header was used on it. A client MUST NOT submit 554 # a Depth header with a DELETE on a collection with any value but 555 # infinity." 556 if environ.setdefault("HTTP_DEPTH", "infinity") != "infinity": 557 self._fail( 558 HTTP_BAD_REQUEST, 559 "Only Depth: infinity is supported for collections.", 560 ) 561 else: 562 if not environ.setdefault("HTTP_DEPTH", "0") in ("0", "infinity"): 563 self._fail( 564 HTTP_BAD_REQUEST, 565 "Only Depth: 0 or infinity are supported for non-collections.", 566 ) 567 568 self._evaluate_if_headers(res, environ) 569 # We need write access on the parent collection. Also we check for 570 # locked children 571 parentRes = provider.get_resource_inst(util.get_uri_parent(path), environ) 572 if parentRes: 573 # self._check_write_permission(parentRes, environ["HTTP_DEPTH"], environ) 574 self._check_write_permission(parentRes, "0", environ) 575 else: 576 # self._check_write_permission(res, environ["HTTP_DEPTH"], environ) 577 self._check_write_permission(res, "0", environ) 578 579 # --- Let provider handle the request natively ------------------------ 580 581 # Errors in deletion; [ (<ref-url>, <DAVError>), ... ] 582 error_list = [] 583 584 try: 585 handled = res.handle_delete() 586 assert handled in (True, False) or type(handled) is list 587 if type(handled) is list: 588 error_list = handled 589 handled = True 590 except Exception as e: 591 error_list = [(res.get_href(), as_DAVError(e))] 592 handled = True 593 if handled: 594 return self._send_response( 595 environ, start_response, res, HTTP_NO_CONTENT, error_list 596 ) 597 598 # --- Let provider implement own recursion ---------------------------- 599 600 # Get a list of all resources (parents after children, so we can remove 601 # them in that order) 602 reverseChildList = res.get_descendants( 603 depth_first=True, depth=environ["HTTP_DEPTH"], add_self=True 604 ) 605 606 if res.is_collection and res.support_recursive_delete(): 607 has_conflicts = False 608 for childRes in reverseChildList: 609 try: 610 self._evaluate_if_headers(childRes, environ) 611 self._check_write_permission(childRes, "0", environ) 612 except Exception: 613 has_conflicts = True 614 break 615 616 if not has_conflicts: 617 try: 618 error_list = res.delete() 619 except Exception as e: 620 error_list = [(res.get_href(), as_DAVError(e))] 621 return self._send_response( 622 environ, start_response, res, HTTP_NO_CONTENT, error_list 623 ) 624 625 # --- Implement file-by-file processing ------------------------------- 626 627 # Hidden paths (ancestors of failed deletes) {<path>: True, ...} 628 ignore_dict = {} 629 for childRes in reverseChildList: 630 if childRes.path in ignore_dict: 631 _logger.debug( 632 "Skipping {} (contains error child)".format(childRes.path) 633 ) 634 ignore_dict[util.get_uri_parent(childRes.path)] = "" 635 continue 636 637 try: 638 # 9.6.1.: Any headers included with delete must be applied in 639 # processing every resource to be deleted 640 self._evaluate_if_headers(childRes, environ) 641 self._check_write_permission(childRes, "0", environ) 642 childRes.delete() 643 # Double-check, if deletion succeeded 644 if provider.exists(childRes.path, environ): 645 raise DAVError( 646 HTTP_INTERNAL_ERROR, "Resource could not be deleted." 647 ) 648 except Exception as e: 649 error_list.append((childRes.get_href(), as_DAVError(e))) 650 ignore_dict[util.get_uri_parent(childRes.path)] = True 651 652 # --- Send response --------------------------------------------------- 653 654 return self._send_response( 655 environ, start_response, res, HTTP_NO_CONTENT, error_list 656 ) 657 658 def _stream_data_chunked(self, environ, block_size): 659 """Get the data from a chunked transfer.""" 660 # Chunked Transfer Coding 661 # http://www.servlets.com/rfcs/rfc2616-sec3.html#sec3.6.1 662 663 if "Darwin" in environ.get("HTTP_USER_AGENT", "") and environ.get( 664 "HTTP_X_EXPECTED_ENTITY_LENGTH" 665 ): 666 # Mac Finder, that does not prepend chunk-size + CRLF , 667 # like it should to comply with the spec. It sends chunk 668 # size as integer in a HTTP header instead. 669 WORKAROUND_CHUNK_LENGTH = True 670 buf = environ.get("HTTP_X_EXPECTED_ENTITY_LENGTH", "0") 671 length = int(buf) 672 else: 673 WORKAROUND_CHUNK_LENGTH = False 674 buf = environ["wsgi.input"].readline() 675 environ["wsgidav.some_input_read"] = 1 676 if buf == compat.b_empty: 677 length = 0 678 else: 679 length = int(buf, 16) 680 681 while length > 0: 682 buf = environ["wsgi.input"].read(block_size) 683 yield buf 684 if WORKAROUND_CHUNK_LENGTH: 685 environ["wsgidav.some_input_read"] = 1 686 # Keep receiving until we read expected size or reach 687 # EOF 688 if buf == compat.b_empty: 689 length = 0 690 else: 691 length -= len(buf) 692 else: 693 environ["wsgi.input"].readline() 694 buf = environ["wsgi.input"].readline() 695 if buf == compat.b_empty: 696 length = 0 697 else: 698 length = int(buf, 16) 699 environ["wsgidav.all_input_read"] = 1 700 701 def _stream_data(self, environ, content_length, block_size): 702 """Get the data from a non-chunked transfer.""" 703 if content_length == 0: 704 # TODO: review this 705 # Windows MiniRedir submit PUT with Content-Length 0, 706 # before LOCK and the real PUT. So we have to accept this. 707 _logger.debug("PUT: Content-Length == 0. Creating empty file...") 708 709 # elif content_length < 0: 710 # # TODO: review this 711 # # If CONTENT_LENGTH is invalid, we may try to workaround this 712 # # by reading until the end of the stream. This may block however! 713 # # The iterator produced small chunks of varying size, but not 714 # # sure, if we always get everything before it times out. 715 # _logger.warning("PUT with invalid Content-Length (%s). " 716 # "Trying to read all (this may timeout)..." 717 # .format(environ.get("CONTENT_LENGTH"))) 718 # nb = 0 719 # try: 720 # for s in environ["wsgi.input"]: 721 # environ["wsgidav.some_input_read"] = 1 722 # _logger.debug("PUT: read from wsgi.input.__iter__, len=%s" % len(s)) 723 # yield s 724 # nb += len (s) 725 # except socket.timeout: 726 # _logger.warning("PUT: input timed out after writing %s bytes" % nb) 727 # hasErrors = True 728 else: 729 assert content_length > 0 730 contentremain = content_length 731 while contentremain > 0: 732 n = min(contentremain, block_size) 733 readbuffer = environ["wsgi.input"].read(n) 734 # This happens with litmus expect-100 test: 735 if not len(readbuffer) > 0: 736 _logger.error("input.read({}) returned 0 bytes".format(n)) 737 break 738 environ["wsgidav.some_input_read"] = 1 739 yield readbuffer 740 contentremain -= len(readbuffer) 741 742 if contentremain == 0: 743 environ["wsgidav.all_input_read"] = 1 744 745 def do_PUT(self, environ, start_response): 746 """ 747 @see: http://www.webdav.org/specs/rfc4918.html#METHOD_PUT 748 """ 749 path = environ["PATH_INFO"] 750 provider = self._davProvider 751 res = provider.get_resource_inst(path, environ) 752 parentRes = provider.get_resource_inst(util.get_uri_parent(path), environ) 753 754 isnewfile = res is None 755 756 # Test for unsupported stuff 757 if "HTTP_CONTENT_ENCODING" in environ: 758 util.fail(HTTP_NOT_IMPLEMENTED, "Content-encoding header is not supported.") 759 760 # An origin server that allows PUT on a given target resource MUST send 761 # a 400 (Bad Request) response to a PUT request that contains a 762 # Content-Range header field 763 # (http://tools.ietf.org/html/rfc7231#section-4.3.4) 764 if "HTTP_CONTENT_RANGE" in environ: 765 util.fail( 766 HTTP_BAD_REQUEST, "Content-range header is not allowed on PUT requests." 767 ) 768 769 if res and res.is_collection: 770 self._fail(HTTP_METHOD_NOT_ALLOWED, "Cannot PUT to a collection") 771 elif ( 772 parentRes is None or not parentRes.is_collection 773 ): # TODO: allow parentRes==None? 774 self._fail(HTTP_CONFLICT, "PUT parent must be a collection") 775 776 self._evaluate_if_headers(res, environ) 777 778 if isnewfile: 779 self._check_write_permission(parentRes, "0", environ) 780 res = parentRes.create_empty_resource(util.get_uri_name(path)) 781 else: 782 self._check_write_permission(res, "0", environ) 783 784 # Start Content Processing 785 # Content-Length may be 0 or greater. (Set to -1 if missing or invalid.) 786 # WORKAROUND_BAD_LENGTH = True 787 try: 788 content_length = max(-1, int(environ.get("CONTENT_LENGTH", -1))) 789 except ValueError: 790 content_length = -1 791 792 # if content_length < 0 and not WORKAROUND_BAD_LENGTH: 793 if (content_length < 0) and ( 794 environ.get("HTTP_TRANSFER_ENCODING", "").lower() != "chunked" 795 ): 796 # HOTFIX: not fully understood, but MS sends PUT without content-length, 797 # when creating new files 798 agent = environ.get("HTTP_USER_AGENT", "") 799 if "Microsoft-WebDAV-MiniRedir" in agent or "gvfs/" in agent: # issue #10 800 _logger.warning( 801 "Setting misssing Content-Length to 0 for MS / gvfs client" 802 ) 803 content_length = 0 804 else: 805 util.fail( 806 HTTP_LENGTH_REQUIRED, 807 "PUT request with invalid Content-Length: ({})".format( 808 environ.get("CONTENT_LENGTH") 809 ), 810 ) 811 812 hasErrors = False 813 try: 814 if environ.get("HTTP_TRANSFER_ENCODING", "").lower() == "chunked": 815 data_stream = self._stream_data_chunked(environ, self.block_size) 816 else: 817 data_stream = self._stream_data( 818 environ, content_length, self.block_size 819 ) 820 821 fileobj = res.begin_write(content_type=environ.get("CONTENT_TYPE")) 822 823 # Process the data in the body. 824 825 # If the fileobj has a writelines() method, give it the data stream. 826 # If it doesn't, itearate the stream and call write() for each 827 # iteration. This gives providers more flexibility in how they 828 # consume the data. 829 if getattr(fileobj, "writelines", None): 830 fileobj.writelines(data_stream) 831 else: 832 for data in data_stream: 833 fileobj.write(data) 834 835 fileobj.close() 836 837 except Exception as e: 838 res.end_write(with_errors=True) 839 _logger.exception("PUT: byte copy failed") 840 util.fail(e) 841 842 res.end_write(hasErrors) 843 844 headers = None 845 if res.support_etag(): 846 entitytag = res.get_etag() 847 if entitytag is not None: 848 headers = [("ETag", '"{}"'.format(entitytag))] 849 850 if isnewfile: 851 return util.send_status_response( 852 environ, start_response, HTTP_CREATED, add_headers=headers 853 ) 854 return util.send_status_response( 855 environ, start_response, HTTP_NO_CONTENT, add_headers=headers 856 ) 857 858 def do_COPY(self, environ, start_response): 859 return self._copy_or_move(environ, start_response, False) 860 861 def do_MOVE(self, environ, start_response): 862 return self._copy_or_move(environ, start_response, True) 863 864 def _copy_or_move(self, environ, start_response, is_move): 865 """ 866 @see: http://www.webdav.org/specs/rfc4918.html#METHOD_COPY 867 @see: http://www.webdav.org/specs/rfc4918.html#METHOD_MOVE 868 """ 869 src_path = environ["PATH_INFO"] 870 provider = self._davProvider 871 src_res = provider.get_resource_inst(src_path, environ) 872 src_parent_res = provider.get_resource_inst( 873 util.get_uri_parent(src_path), environ 874 ) 875 876 def _debug_exception(e): 877 """Log internal exceptions with stacktrace that otherwise would be hidden.""" 878 if self._verbose >= 5: 879 _logger.exception("_debug_exception") 880 return 881 882 # --- Check source ---------------------------------------------------- 883 884 if src_res is None: 885 self._fail(HTTP_NOT_FOUND, src_path) 886 if "HTTP_DESTINATION" not in environ: 887 self._fail(HTTP_BAD_REQUEST, "Missing required Destination header.") 888 if not environ.setdefault("HTTP_OVERWRITE", "T") in ("T", "F"): 889 # Overwrite defaults to 'T' 890 self._fail(HTTP_BAD_REQUEST, "Invalid Overwrite header.") 891 if util.get_content_length(environ) != 0: 892 # RFC 2518 defined support for <propertybehavior>. 893 # This was dropped with RFC 4918. 894 # Still clients may send it (e.g. DAVExplorer 0.9.1 File-Copy) sends 895 # <A:propertybehavior xmlns:A="DAV:"> <A:keepalive>*</A:keepalive> 896 body = environ["wsgi.input"].read(util.get_content_length(environ)) 897 environ["wsgidav.all_input_read"] = 1 898 _logger.info("Ignored copy/move body: '{}'...".format(body[:50])) 899 900 if src_res.is_collection: 901 # The COPY method on a collection without a Depth header MUST act as 902 # if a Depth header with value "infinity" was included. 903 # A client may submit a Depth header on a COPY on a collection with 904 # a value of "0" or "infinity". 905 environ.setdefault("HTTP_DEPTH", "infinity") 906 if not environ["HTTP_DEPTH"] in ("0", "infinity"): 907 self._fail(HTTP_BAD_REQUEST, "Invalid Depth header.") 908 if is_move and environ["HTTP_DEPTH"] != "infinity": 909 self._fail( 910 HTTP_BAD_REQUEST, 911 "Depth header for MOVE collection must be 'infinity'.", 912 ) 913 else: 914 # It's an existing non-collection: assume Depth 0 915 # Note: litmus 'copymove: 3 (copy_simple)' sends 'infinity' for a 916 # non-collection resource, so we accept that too 917 environ.setdefault("HTTP_DEPTH", "0") 918 if not environ["HTTP_DEPTH"] in ("0", "infinity"): 919 self._fail(HTTP_BAD_REQUEST, "Invalid Depth header.") 920 environ["HTTP_DEPTH"] = "0" 921 922 # --- Get destination path and check for cross-realm access ----------- 923 924 # Destination header may be quoted (e.g. DAV Explorer sends unquoted, 925 # Windows quoted) 926 http_destination = compat.unquote(environ["HTTP_DESTINATION"]) 927 928 # Return fragments as part of <path> 929 # Fixes litmus -> running `basic': 9. delete_fragment....... WARNING: 930 # DELETE removed collection resource withRequest-URI including 931 # fragment; unsafe 932 ( 933 dest_scheme, 934 dest_netloc, 935 dest_path, 936 _dest_params, 937 _dest_query, 938 _dest_frag, 939 ) = compat.urlparse(http_destination, allow_fragments=False) 940 941 if src_res.is_collection: 942 dest_path = dest_path.rstrip("/") + "/" 943 944 if dest_scheme and dest_scheme.lower() != environ["wsgi.url_scheme"].lower(): 945 self._fail( 946 HTTP_BAD_GATEWAY, 947 "Source and destination must have the same scheme.\n" 948 "If you are running behind a reverse proxy, you may have to " 949 "rewrite the 'Destination' haeader.\n" 950 "(See https://github.com/mar10/wsgidav/issues/183)", 951 ) 952 elif dest_netloc and dest_netloc.lower() != environ["HTTP_HOST"].lower(): 953 # TODO: this should consider environ["SERVER_PORT"] also 954 self._fail( 955 HTTP_BAD_GATEWAY, "Source and destination must have the same host name." 956 ) 957 elif not dest_path.startswith(provider.mount_path + provider.share_path): 958 # Inter-realm copying not supported, since its not possible to 959 # authentication-wise 960 self._fail(HTTP_BAD_GATEWAY, "Inter-realm copy/move is not supported.") 961 962 dest_path = dest_path[len(provider.mount_path + provider.share_path) :] 963 assert dest_path.startswith("/") 964 965 # dest_path is now relative to current mount/share starting with '/' 966 967 dest_res = provider.get_resource_inst(dest_path, environ) 968 dest_exists = dest_res is not None 969 970 dest_parent_res = provider.get_resource_inst( 971 util.get_uri_parent(dest_path), environ 972 ) 973 974 if not dest_parent_res or not dest_parent_res.is_collection: 975 self._fail(HTTP_CONFLICT, "Destination parent must be a collection.") 976 977 self._evaluate_if_headers(src_res, environ) 978 self._evaluate_if_headers(dest_res, environ) 979 # Check permissions 980 # http://www.webdav.org/specs/rfc4918.html#rfc.section.7.4 981 if is_move: 982 self._check_write_permission(src_res, "infinity", environ) 983 # Cannot remove members from locked-0 collections 984 if src_parent_res: 985 self._check_write_permission(src_parent_res, "0", environ) 986 987 # Cannot create or new members in locked-0 collections 988 if not dest_exists: 989 self._check_write_permission(dest_parent_res, "0", environ) 990 # If target exists, it must not be locked 991 self._check_write_permission(dest_res, "infinity", environ) 992 993 if src_path == dest_path: 994 self._fail(HTTP_FORBIDDEN, "Cannot copy/move source onto itself") 995 elif util.is_equal_or_child_uri(src_path, dest_path): 996 self._fail(HTTP_FORBIDDEN, "Cannot copy/move source below itself") 997 998 if dest_exists and environ["HTTP_OVERWRITE"] != "T": 999 self._fail( 1000 HTTP_PRECONDITION_FAILED, 1001 "Destination already exists and Overwrite is set to false", 1002 ) 1003 1004 # --- Let provider handle the request natively ------------------------ 1005 1006 # Errors in copy/move; [ (<ref-url>, <DAVError>), ... ] 1007 error_list = [] 1008 success_code = HTTP_CREATED 1009 if dest_exists: 1010 success_code = HTTP_NO_CONTENT 1011 1012 try: 1013 if is_move: 1014 handled = src_res.handle_move(dest_path) 1015 else: 1016 isInfinity = environ["HTTP_DEPTH"] == "infinity" 1017 handled = src_res.handle_copy(dest_path, isInfinity) 1018 assert handled in (True, False) or type(handled) is list 1019 if type(handled) is list: 1020 error_list = handled 1021 handled = True 1022 except Exception as e: 1023 _debug_exception(e) 1024 error_list = [(src_res.get_href(), as_DAVError(e))] 1025 handled = True 1026 if handled: 1027 return self._send_response( 1028 environ, start_response, src_res, HTTP_NO_CONTENT, error_list 1029 ) 1030 1031 # --- Cleanup destination before copy/move ---------------------------- 1032 1033 src_list = src_res.get_descendants(add_self=True) 1034 1035 src_root_len = len(src_path) 1036 dest_root_len = len(dest_path) 1037 1038 if dest_exists: 1039 if is_move or not dest_res.is_collection or not src_res.is_collection: 1040 # MOVE: 1041 # If a resource exists at the destination and the Overwrite 1042 # header is "T", then prior to performing the move, the server 1043 # MUST perform a DELETE with "Depth: infinity" on the 1044 # destination resource. 1045 _logger.debug("Remove dest before move: '{}'".format(dest_res)) 1046 dest_res.delete() 1047 dest_res = None 1048 else: 1049 # COPY collection over collection: 1050 # Remove destination files, that are not part of source, because 1051 # source and dest collections must not be merged (9.8.4). 1052 # This is not the same as deleting the complete dest collection 1053 # before copying, because that would also discard the history of 1054 # existing resources. 1055 reverse_dest_list = dest_res.get_descendants( 1056 depth_first=True, add_self=False 1057 ) 1058 src_path_list = [s.path for s in src_list] 1059 _logger.debug("check src_path_list: {}".format(src_path_list)) 1060 for dres in reverse_dest_list: 1061 _logger.debug("check unmatched dest before copy: {}".format(dres)) 1062 rel_url = dres.path[dest_root_len:] 1063 sp = src_path + rel_url 1064 if sp not in src_path_list: 1065 _logger.debug( 1066 "Remove unmatched dest before copy: {}".format(dres) 1067 ) 1068 dres.delete() 1069 1070 # --- Let provider implement recursive move --------------------------- 1071 # We do this only, if the provider supports it, and no conflicts exist. 1072 # A provider can implement this very efficiently, without allocating 1073 # double memory as a copy/delete approach would. 1074 1075 if is_move and src_res.support_recursive_move(dest_path): 1076 has_conflicts = False 1077 for s in src_list: 1078 try: 1079 self._evaluate_if_headers(s, environ) 1080 except Exception as e: 1081 _debug_exception(e) 1082 has_conflicts = True 1083 break 1084 1085 if not has_conflicts: 1086 try: 1087 _logger.debug( 1088 "Recursive move: {} -> '{}'".format(src_res, dest_path) 1089 ) 1090 error_list = src_res.move_recursive(dest_path) 1091 except Exception as e: 1092 _debug_exception(e) 1093 error_list = [(src_res.get_href(), as_DAVError(e))] 1094 1095 return self._send_response( 1096 environ, start_response, src_res, success_code, error_list 1097 ) 1098 1099 # --- Copy/move file-by-file using copy/delete ------------------------ 1100 1101 # We get here, if 1102 # - the provider does not support recursive moves 1103 # - this is a copy request 1104 # In this case we would probably not win too much by a native provider 1105 # implementation, since we had to handle single child errors anyway. 1106 # - the source tree is partially locked 1107 # We would have to pass this information to the native provider. 1108 1109 # Hidden paths (paths of failed copy/moves) {<src_path>: True, ...} 1110 ignore_dict = {} 1111 1112 for sres in src_list: 1113 # Skip this resource, if there was a failure copying a parent 1114 parent_error = False 1115 for ignorePath in ignore_dict.keys(): 1116 if util.is_equal_or_child_uri(ignorePath, sres.path): 1117 parent_error = True 1118 break 1119 if parent_error: 1120 _logger.debug( 1121 "Copy: skipping '{}', because of parent error".format(sres.path) 1122 ) 1123 continue 1124 1125 try: 1126 rel_url = sres.path[src_root_len:] 1127 dpath = dest_path + rel_url 1128 1129 self._evaluate_if_headers(sres, environ) 1130 1131 # We copy resources and their properties top-down. 1132 # Collections are simply created (without members), for 1133 # non-collections bytes are copied (overwriting target) 1134 sres.copy_move_single(dpath, is_move) 1135 1136 # If copy succeeded, and it was a non-collection delete it now. 1137 # So the source tree shrinks while the destination grows and we 1138 # don't have to allocate the memory twice. 1139 # We cannot remove collections here, because we have not yet 1140 # copied all children. 1141 if is_move and not sres.is_collection: 1142 sres.delete() 1143 1144 except Exception as e: 1145 _debug_exception(e) 1146 ignore_dict[sres.path] = True 1147 # TODO: the error-href should be 'most appropriate of the source 1148 # and destination URLs'. So maybe this should be the destination 1149 # href sometimes. 1150 # http://www.webdav.org/specs/rfc4918.html#rfc.section.9.8.5 1151 error_list.append((sres.get_href(), as_DAVError(e))) 1152 1153 # MOVE: Remove source tree (bottom-up) 1154 if is_move: 1155 reverse_src_list = src_list[:] 1156 reverse_src_list.reverse() 1157 _logger.debug("Delete after move, ignore_dict={}".format(ignore_dict)) 1158 for sres in reverse_src_list: 1159 # Non-collections have already been removed in the copy loop. 1160 if not sres.is_collection: 1161 continue 1162 # Skip collections that contain errors (unmoved resources) 1163 child_error = False 1164 for ignorePath in ignore_dict.keys(): 1165 if util.is_equal_or_child_uri(sres.path, ignorePath): 1166 child_error = True 1167 break 1168 if child_error: 1169 _logger.debug( 1170 "Delete after move: skipping '{}', because of child error".format( 1171 sres.path 1172 ) 1173 ) 1174 continue 1175 1176 try: 1177 _logger.debug("Remove collection after move: {}".format(sres)) 1178 sres.delete() 1179 except Exception as e: 1180 _debug_exception(e) 1181 error_list.append((src_res.get_href(), as_DAVError(e))) 1182 1183 _logger.debug("ErrorList: {}".format(error_list)) 1184 1185 # --- Return response ------------------------------------------------- 1186 1187 return self._send_response( 1188 environ, start_response, src_res, success_code, error_list 1189 ) 1190 1191 def do_LOCK(self, environ, start_response): 1192 """ 1193 @see: http://www.webdav.org/specs/rfc4918.html#METHOD_LOCK 1194 """ 1195 path = environ["PATH_INFO"] 1196 provider = self._davProvider 1197 res = provider.get_resource_inst(path, environ) 1198 lock_man = provider.lock_manager 1199 1200 if lock_man is None: 1201 # http://www.webdav.org/specs/rfc4918.html#rfc.section.6.3 1202 self._fail(HTTP_NOT_IMPLEMENTED, "This realm does not support locking.") 1203 if res and res.prevent_locking(): 1204 self._fail(HTTP_FORBIDDEN, "This resource does not support locking.") 1205 1206 if environ.setdefault("HTTP_DEPTH", "infinity") not in ("0", "infinity"): 1207 self._fail(HTTP_BAD_REQUEST, "Expected Depth: 'infinity' or '0'.") 1208 1209 self._evaluate_if_headers(res, environ) 1210 1211 timeout_secs = util.read_timeout_value_header(environ.get("HTTP_TIMEOUT", "")) 1212 submitted_token_list = environ["wsgidav.ifLockTokenList"] 1213 1214 lockinfo_el = util.parse_xml_body(environ, allow_empty=True) 1215 1216 # --- Special case: empty request body -------------------------------- 1217 1218 if lockinfo_el is None: 1219 # TODO: @see 9.10.2 1220 # TODO: 'URL of a resource within the scope of the lock' 1221 # Other (shared) locks are unaffected and don't prevent refreshing 1222 # TODO: check for valid user 1223 # TODO: check for If with single lock token 1224 environ["HTTP_DEPTH"] = "0" # MUST ignore depth header on refresh 1225 1226 if res is None: 1227 self._fail( 1228 HTTP_BAD_REQUEST, "LOCK refresh must specify an existing resource." 1229 ) 1230 if len(submitted_token_list) != 1: 1231 self._fail( 1232 HTTP_BAD_REQUEST, 1233 "Expected a lock token (only one lock may be refreshed at a time).", 1234 ) 1235 elif not lock_man.is_url_locked_by_token( 1236 res.get_ref_url(), submitted_token_list[0] 1237 ): 1238 self._fail( 1239 HTTP_PRECONDITION_FAILED, 1240 "Lock token does not match URL.", 1241 err_condition=PRECONDITION_CODE_LockTokenMismatch, 1242 ) 1243 # TODO: test, if token is owned by user 1244 1245 lock = lock_man.refresh(submitted_token_list[0], timeout_secs) 1246 1247 # The lock root may be <path>, or a parent of <path>. 1248 lock_path = provider.ref_url_to_path(lock["root"]) 1249 lock_res = provider.get_resource_inst(lock_path, environ) 1250 1251 prop_el = xml_tools.make_prop_el() 1252 # TODO: handle exceptions in get_property_value 1253 lockdiscovery_el = lock_res.get_property_value("{DAV:}lockdiscovery") 1254 prop_el.append(lockdiscovery_el) 1255 1256 # Lock-Token header is not returned 1257 xml = xml_tools.xml_to_bytes(prop_el) 1258 start_response( 1259 "200 OK", 1260 [ 1261 ("Content-Type", "application/xml"), 1262 ("Content-Length", str(len(xml))), 1263 ("Date", util.get_rfc1123_time()), 1264 ], 1265 ) 1266 return [xml] 1267 1268 # --- Standard case: parse xml body ----------------------------------- 1269 1270 if lockinfo_el.tag != "{DAV:}lockinfo": 1271 self._fail(HTTP_BAD_REQUEST) 1272 1273 lock_type = None 1274 lock_scope = None 1275 lock_owner = compat.to_bytes("") 1276 lock_depth = environ.setdefault("HTTP_DEPTH", "infinity") 1277 1278 for linode in lockinfo_el: 1279 if linode.tag == "{DAV:}lockscope": 1280 for lsnode in linode: 1281 if lsnode.tag == "{DAV:}exclusive": 1282 lock_scope = "exclusive" 1283 elif lsnode.tag == "{DAV:}shared": 1284 lock_scope = "shared" 1285 break 1286 elif linode.tag == "{DAV:}locktype": 1287 for ltnode in linode: 1288 if ltnode.tag == "{DAV:}write": 1289 lock_type = "write" # only type accepted 1290 break 1291 1292 elif linode.tag == "{DAV:}owner": 1293 # Store whole <owner> tag, so we can use etree.XML() later 1294 lock_owner = xml_tools.xml_to_bytes(linode, pretty_print=False) 1295 1296 else: 1297 self._fail(HTTP_BAD_REQUEST, "Invalid node '{}'.".format(linode.tag)) 1298 1299 if not lock_scope: 1300 self._fail(HTTP_BAD_REQUEST, "Missing or invalid lockscope.") 1301 if not lock_type: 1302 self._fail(HTTP_BAD_REQUEST, "Missing or invalid locktype.") 1303 1304 if environ.get("wsgidav.debug_break"): 1305 pass # break point 1306 1307 # TODO: check for locked parents BEFORE creating an empty child 1308 1309 # http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.4 1310 # Locking unmapped URLs: must create an empty resource 1311 createdNewResource = False 1312 if res is None: 1313 parentRes = provider.get_resource_inst(util.get_uri_parent(path), environ) 1314 if not parentRes or not parentRes.is_collection: 1315 self._fail(HTTP_CONFLICT, "LOCK-0 parent must be a collection") 1316 res = parentRes.create_empty_resource(util.get_uri_name(path)) 1317 createdNewResource = True 1318 1319 # --- Check, if path is already locked -------------------------------- 1320 1321 # May raise DAVError(HTTP_LOCKED): 1322 lock = lock_man.acquire( 1323 res.get_ref_url(), 1324 lock_type, 1325 lock_scope, 1326 lock_depth, 1327 lock_owner, 1328 timeout_secs, 1329 environ["wsgidav.user_name"], 1330 submitted_token_list, 1331 ) 1332 1333 # Lock succeeded 1334 prop_el = xml_tools.make_prop_el() 1335 # TODO: handle exceptions in get_property_value 1336 lockdiscovery_el = res.get_property_value("{DAV:}lockdiscovery") 1337 prop_el.append(lockdiscovery_el) 1338 1339 respcode = "200 OK" 1340 if createdNewResource: 1341 respcode = "201 Created" 1342 1343 xml = xml_tools.xml_to_bytes(prop_el) 1344 start_response( 1345 respcode, 1346 [ 1347 ("Content-Type", "application/xml"), 1348 ("Content-Length", str(len(xml))), 1349 ("Lock-Token", lock["token"]), 1350 ("Date", util.get_rfc1123_time()), 1351 ], 1352 ) 1353 return [xml] 1354 1355 # TODO: LOCK may also fail with HTTP_FORBIDDEN. 1356 # In this case we should return 207 Multi-Status. 1357 # http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.9 1358 # Checking this would require to call res.prevent_locking() 1359 # recursively. 1360 1361 # # --- Locking FAILED: return fault response 1362 # if len(conflictList) == 1 and conflictList[0][0]["root"] == res.get_ref_url(): 1363 # # If there is only one error for the root URL, send as simple error response 1364 # return util.send_status_response(environ, start_response, conflictList[0][1]) 1365 # 1366 # dictStatus = {} 1367 # 1368 # for lock_dict, e in conflictList: 1369 # dictStatus[lock_dict["root"]] = e 1370 # 1371 # if not res.get_ref_url() in dictStatus: 1372 # dictStatus[res.get_ref_url()] = DAVError(HTTP_FAILED_DEPENDENCY) 1373 # 1374 # # Return multi-status fault response 1375 # multistatusEL = xml_tools.make_multistatus_el() 1376 # for nu, e in dictStatus.items(): 1377 # responseEL = etree.SubElement(multistatusEL, "{DAV:}response") 1378 # etree.SubElement(responseEL, "{DAV:}href").text = nu 1379 # etree.SubElement(responseEL, "{DAV:}status").text = "HTTP/1.1 %s" % 1380 # get_http_status_string(e) 1381 # # TODO: all responses should have this(?): 1382 # if e.context_info: 1383 # etree.SubElement(multistatusEL, "{DAV:}responsedescription").text = e.context_info 1384 # 1385 # if responsedescription: 1386 # etree.SubElement(multistatusEL, "{DAV:}responsedescription").text = "\n".join( 1387 # responsedescription) 1388 # 1389 # return util.send_multi_status_response(environ, start_response, 1390 # multistatusEL) 1391 1392 def do_UNLOCK(self, environ, start_response): 1393 """ 1394 @see: http://www.webdav.org/specs/rfc4918.html#METHOD_UNLOCK 1395 """ 1396 path = environ["PATH_INFO"] 1397 provider = self._davProvider 1398 res = self._davProvider.get_resource_inst(path, environ) 1399 1400 lock_man = provider.lock_manager 1401 if lock_man is None: 1402 self._fail(HTTP_NOT_IMPLEMENTED, "This share does not support locking.") 1403 elif util.get_content_length(environ) != 0: 1404 self._fail( 1405 HTTP_MEDIATYPE_NOT_SUPPORTED, 1406 "The server does not handle any body content.", 1407 ) 1408 elif res is None: 1409 self._fail(HTTP_NOT_FOUND, path) 1410 elif "HTTP_LOCK_TOKEN" not in environ: 1411 self._fail(HTTP_BAD_REQUEST, "Missing lock token.") 1412 1413 self._evaluate_if_headers(res, environ) 1414 1415 lock_token = environ["HTTP_LOCK_TOKEN"].strip("<>") 1416 ref_url = res.get_ref_url() 1417 1418 if not lock_man.is_url_locked_by_token(ref_url, lock_token): 1419 self._fail( 1420 HTTP_CONFLICT, 1421 "Resource is not locked by token.", 1422 err_condition=PRECONDITION_CODE_LockTokenMismatch, 1423 ) 1424 1425 if not lock_man.is_token_locked_by_user( 1426 lock_token, environ["wsgidav.user_name"] 1427 ): 1428 # TODO: there must be a way to allow this for admins. 1429 # Maybe test for "remove_locks" in environ["wsgidav.roles"] 1430 self._fail(HTTP_FORBIDDEN, "Token was created by another user.") 1431 1432 # TODO: Is this correct?: unlock(a/b/c) will remove Lock for 'a/b' 1433 lock_man.release(lock_token) 1434 1435 return util.send_status_response(environ, start_response, HTTP_NO_CONTENT) 1436 1437 def do_OPTIONS(self, environ, start_response): 1438 """ 1439 @see http://www.webdav.org/specs/rfc4918.html#HEADER_DAV 1440 """ 1441 path = environ["PATH_INFO"] 1442 provider = self._davProvider 1443 res = provider.get_resource_inst(path, environ) 1444 1445 dav_compliance_level = "1,2" 1446 if provider is None or provider.is_readonly() or provider.lock_manager is None: 1447 dav_compliance_level = "1" 1448 1449 headers = [ 1450 ("Content-Type", "text/html"), 1451 ("Content-Length", "0"), 1452 ("DAV", dav_compliance_level), 1453 ("Date", util.get_rfc1123_time()), 1454 ] 1455 1456 if path == "/": 1457 path = "*" # Hotfix for WinXP 1458 1459 if path == "*": 1460 # Answer HTTP 'OPTIONS' method on server-level. 1461 # From RFC 2616 1462 # If the Request-URI is an asterisk ("*"), the OPTIONS request is 1463 # intended to apply to the server in general rather than to a specific 1464 # resource. Since a server's communication options typically depend on 1465 # the resource, the "*" request is only useful as a "ping" or "no-op" 1466 # type of method; it does nothing beyond allowing the client to test the 1467 # capabilities of the server. For example, this can be used to test a 1468 # proxy for HTTP/1.1 compliance (or lack thereof). 1469 start_response("200 OK", headers) 1470 return [b""] 1471 1472 # Determine allowed request methods 1473 allow = ["OPTIONS"] 1474 if res and res.is_collection: 1475 # Existing collection 1476 allow.extend(["HEAD", "GET", "PROPFIND"]) 1477 # if provider.prop_manager is not None: 1478 # allow.extend( [ "PROPFIND" ] ) 1479 if not provider.is_readonly(): 1480 allow.extend(["DELETE", "COPY", "MOVE", "PROPPATCH"]) 1481 # if provider.prop_manager is not None: 1482 # allow.extend( [ "PROPPATCH" ] ) 1483 if provider.lock_manager is not None: 1484 allow.extend(["LOCK", "UNLOCK"]) 1485 elif res: 1486 # Existing resource 1487 allow.extend(["HEAD", "GET", "PROPFIND"]) 1488 # if provider.prop_manager is not None: 1489 # allow.extend( [ "PROPFIND" ] ) 1490 if not provider.is_readonly(): 1491 allow.extend(["PUT", "DELETE", "COPY", "MOVE", "PROPPATCH"]) 1492 # if provider.prop_manager is not None: 1493 # allow.extend( [ "PROPPATCH" ] ) 1494 if provider.lock_manager is not None: 1495 allow.extend(["LOCK", "UNLOCK"]) 1496 if res.support_ranges(): 1497 headers.append(("Accept-Ranges", "bytes")) 1498 elif provider.is_collection(util.get_uri_parent(path), environ): 1499 # A new resource below an existing collection 1500 # TODO: should we allow LOCK here? I think it is allowed to lock an 1501 # non-existing resource 1502 if not provider.is_readonly(): 1503 allow.extend(["PUT", "MKCOL"]) 1504 else: 1505 self._fail(HTTP_NOT_FOUND, path) 1506 1507 headers.append(("Allow", ", ".join(allow))) 1508 1509 if environ["wsgidav.config"].get("add_header_MS_Author_Via", False): 1510 headers.append(("MS-Author-Via", "DAV")) 1511 1512 start_response("200 OK", headers) 1513 return [b""] 1514 1515 def do_GET(self, environ, start_response): 1516 return self._send_resource(environ, start_response, is_head_method=False) 1517 1518 def do_HEAD(self, environ, start_response): 1519 return self._send_resource(environ, start_response, is_head_method=True) 1520 1521 def _send_resource(self, environ, start_response, is_head_method): 1522 """ 1523 If-Range 1524 If the entity is unchanged, send me the part(s) that I am missing; 1525 otherwise, send me the entire new entity 1526 If-Range: "737060cd8c284d8af7ad3082f209582d" 1527 1528 @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 1529 """ 1530 path = environ["PATH_INFO"] 1531 res = self._davProvider.get_resource_inst(path, environ) 1532 1533 if util.get_content_length(environ) != 0: 1534 self._fail( 1535 HTTP_MEDIATYPE_NOT_SUPPORTED, 1536 "The server does not handle any body content.", 1537 ) 1538 elif environ.setdefault("HTTP_DEPTH", "0") != "0": 1539 self._fail(HTTP_BAD_REQUEST, "Only Depth: 0 supported.") 1540 elif res is None: 1541 self._fail(HTTP_NOT_FOUND, path) 1542 elif res.is_collection: 1543 self._fail( 1544 HTTP_FORBIDDEN, 1545 "Directory browsing is not enabled." 1546 "(to enable it put WsgiDavDirBrowser into middleware_stack" 1547 "option and set dir_browser -> enabled = True option.)", 1548 ) 1549 1550 self._evaluate_if_headers(res, environ) 1551 1552 filesize = res.get_content_length() 1553 if filesize is None: 1554 filesize = -1 # flag logic to read until EOF 1555 1556 last_modified = res.get_last_modified() 1557 if last_modified is None: 1558 last_modified = -1 1559 1560 entitytag = res.get_etag() 1561 if entitytag is None: 1562 entitytag = "[]" 1563 1564 # Ranges 1565 doignoreranges = ( 1566 not res.support_content_length() 1567 or not res.support_ranges() 1568 or filesize == 0 1569 ) 1570 if ( 1571 "HTTP_RANGE" in environ 1572 and "HTTP_IF_RANGE" in environ 1573 and not doignoreranges 1574 ): 1575 ifrange = environ["HTTP_IF_RANGE"] 1576 # Try as http-date first (Return None, if invalid date string) 1577 secstime = util.parse_time_string(ifrange) 1578 if secstime: 1579 # cast to integer, as last_modified may be a floating point number 1580 if int(last_modified) != secstime: 1581 doignoreranges = True 1582 else: 1583 # Use as entity tag 1584 ifrange = ifrange.strip('" ') 1585 if entitytag is None or ifrange != entitytag: 1586 doignoreranges = True 1587 1588 ispartialranges = False 1589 if "HTTP_RANGE" in environ and not doignoreranges: 1590 ispartialranges = True 1591 list_ranges, _totallength = util.obtain_content_ranges( 1592 environ["HTTP_RANGE"], filesize 1593 ) 1594 if len(list_ranges) == 0: 1595 # No valid ranges present 1596 self._fail(HTTP_RANGE_NOT_SATISFIABLE) 1597 1598 # More than one range present -> take only the first range, since 1599 # multiple range returns require multipart, which is not supported 1600 # obtain_content_ranges supports more than one range in case the above 1601 # behaviour changes in future 1602 (range_start, range_end, range_length) = list_ranges[0] 1603 else: 1604 (range_start, range_end, range_length) = (0, filesize - 1, filesize) 1605 1606 # Content Processing 1607 mimetype = res.get_content_type() # provider.get_content_type(path) 1608 1609 response_headers = [] 1610 if res.support_content_length(): 1611 # Content-length must be of type string 1612 response_headers.append(("Content-Length", str(range_length))) 1613 if res.support_modified(): 1614 response_headers.append( 1615 ("Last-Modified", util.get_rfc1123_time(last_modified)) 1616 ) 1617 response_headers.append(("Content-Type", mimetype)) 1618 response_headers.append(("Date", util.get_rfc1123_time())) 1619 if res.support_etag(): 1620 response_headers.append(("ETag", '"{}"'.format(entitytag))) 1621 1622 if res.support_ranges(): 1623 response_headers.append(("Accept-Ranges", "bytes")) 1624 1625 if "response_headers" in environ["wsgidav.config"]: 1626 customHeaders = environ["wsgidav.config"]["response_headers"] 1627 for header, value in customHeaders: 1628 response_headers.append((header, value)) 1629 1630 res.finalize_headers(environ, response_headers) 1631 1632 if ispartialranges: 1633 # response_headers.append(("Content-Ranges", "bytes " + str(range_start) + "-" + 1634 # str(range_end) + "/" + str(range_length))) 1635 response_headers.append( 1636 ( 1637 "Content-Range", 1638 "bytes {}-{}/{}".format(range_start, range_end, filesize), 1639 ) 1640 ) 1641 start_response("206 Partial Content", response_headers) 1642 else: 1643 start_response("200 OK", response_headers) 1644 1645 # Return empty body for HEAD requests 1646 if is_head_method: 1647 yield b"" 1648 return 1649 1650 fileobj = res.get_content() 1651 1652 if not doignoreranges: 1653 fileobj.seek(range_start) 1654 1655 contentlengthremaining = range_length 1656 try: 1657 while 1: 1658 if ( 1659 contentlengthremaining < 0 1660 or contentlengthremaining > self.block_size 1661 ): 1662 readbuffer = fileobj.read(self.block_size) 1663 else: 1664 readbuffer = fileobj.read(contentlengthremaining) 1665 assert compat.is_bytes(readbuffer) 1666 yield readbuffer 1667 contentlengthremaining -= len(readbuffer) 1668 if len(readbuffer) == 0 or contentlengthremaining == 0: 1669 break 1670 finally: 1671 # yield readbuffer MAY fail with a GeneratorExit error 1672 # we still need to close the file 1673 fileobj.close() 1674 return 1675 1676 1677# def do_TRACE(self, environ, start_response): 1678# """ TODO: TRACE pending, but not essential.""" 1679# self._fail(HTTP_NOT_IMPLEMENTED) 1680