Damien Bowden: IdentityServer4, WebAPI and Angular2 in a single ASP.NET Core project

This article shows how IdentityServer4 with Identity, a data Web API, and an Angular 2 SPA could be setup inside a single ASP.NET Core project. The application uses the OpenID Connect Implicit Flow with reference tokens to access the API. The Angular 2 application uses webpack to build.

Code: https://github.com/damienbod/AspNet5IdentityServerAngularImplicitFlow

Other posts in this series:

Step 1: Create app and add IdentityServer4

Use the Quickstart6 AspNetIdentity from IdentityServer 4 to setup the application. Then edit the project json file to add your packages as required. I added the Microsoft.AspNetCore.Authentication.JwtBearer package and also the IdentityServer4.AccessTokenValidation package. The buildOptions have to be extended to ignore the node_modules folder.

{
  "userSecretsId": "aspnet-IdentityServerWithAspNetIdentity-1e7bf5d8-6c32-4dd3-b77d-2d7d2e0f5099",

    "dependencies": {
        "IdentityServer4": "1.0.0-rc1-update2",
        "IdentityServer4.AspNetIdentity": "1.0.0-rc1-update2",

        "Microsoft.NETCore.App": {
            "version": "1.0.1",
            "type": "platform"
        },
        "Microsoft.AspNetCore.Authentication.Cookies": "1.0.0",
        "Microsoft.AspNetCore.Diagnostics": "1.0.0",
        "Microsoft.AspNetCore.Diagnostics.EntityFrameworkCore": "1.0.0",
        "Microsoft.AspNetCore.Identity.EntityFrameworkCore": "1.0.0",
        "Microsoft.AspNetCore.Mvc": "1.0.0",
        "Microsoft.AspNetCore.Razor.Tools": {
            "version": "1.0.0-preview2-final",
            "type": "build"
        },
        "Microsoft.AspNetCore.Server.IISIntegration": "1.0.0",
        "Microsoft.AspNetCore.Server.Kestrel": "1.0.0",
        "Microsoft.AspNetCore.StaticFiles": "1.0.0",
        "Microsoft.EntityFrameworkCore.Sqlite": "1.0.0",
        "Microsoft.EntityFrameworkCore.Sqlite.Design": {
            "version": "1.0.0",
            "type": "build"
        },
        "Microsoft.EntityFrameworkCore.Tools": {
            "version": "1.0.0-preview2-final",
            "type": "build"
        },
        "Microsoft.Extensions.Configuration.EnvironmentVariables": "1.0.0",
        "Microsoft.Extensions.Configuration.Json": "1.0.0",
        "Microsoft.Extensions.Configuration.UserSecrets": "1.0.0",
        "Microsoft.Extensions.Logging": "1.0.0",
        "Microsoft.Extensions.Logging.Console": "1.0.0",
        "Microsoft.Extensions.Logging.Debug": "1.0.0",
        "Microsoft.Extensions.Options.ConfigurationExtensions": "1.0.0",
        "Microsoft.VisualStudio.Web.BrowserLink.Loader": "14.0.0",
        "Microsoft.VisualStudio.Web.CodeGeneration.Tools": {
            "version": "1.0.0-preview2-final",
            "type": "build"
        },
        "Microsoft.VisualStudio.Web.CodeGenerators.Mvc": {
            "version": "1.0.0-preview2-final",
            "type": "build"
        },
        "Microsoft.AspNetCore.Authentication.JwtBearer": "1.0.0",
        "IdentityServer4.AccessTokenValidation": "1.0.1-rc1"
    },

  "tools": {
    "BundlerMinifier.Core": "2.0.238",
    "Microsoft.AspNetCore.Razor.Tools": "1.0.0-preview2-final",
    "Microsoft.AspNetCore.Server.IISIntegration.Tools": "1.0.0-preview2-final",
    "Microsoft.EntityFrameworkCore.Tools": "1.0.0-preview2-final",
    "Microsoft.Extensions.SecretManager.Tools": "1.0.0-preview2-final",
    "Microsoft.VisualStudio.Web.CodeGeneration.Tools": {
      "version": "1.0.0-preview2-final",
      "imports": [
        "portable-net45+win8"
      ]
    }
  },

  "frameworks": {
    "netcoreapp1.0": {
      "imports": [
        "dotnet5.6",
        "portable-net45+win8"
      ]
    }
  },

  "buildOptions": {
    "emitEntryPoint": true,
    "preserveCompilationContext": true,
    "compile": {
        "exclude": [ "node_modules" ]
    }
  },

  "runtimeOptions": {
    "configProperties": {
      "System.GC.Server": true
    }
  },

  "publishOptions": {
    "include": [
      "wwwroot",
      "Views",
      "Areas/**/Views",
      "appsettings.json",
      "web.config"
    ]
  },

  "scripts": {
    "prepublish": [ "bower install", "dotnet bundle" ],
    "postpublish": [ "dotnet publish-iis --publish-folder %publish:OutputPath% --framework %publish:FullTargetFramework%" ]
  }
}

The IProfileService interface is implemented to add your user claims to the tokens. The IdentityWithAdditionalClaimsProfileService class implements the IProfileService interface in this example and is added to the services in the Startup class.

using System;
using System.Collections.Generic;
using System.Linq;
using System.Security.Claims;
using System.Threading.Tasks;
using IdentityModel;
using IdentityServer4.Extensions;
using IdentityServer4.Models;
using IdentityServer4.Services;
using IdentityServerWithAspNetIdentity.Models;
using Microsoft.AspNetCore.Identity;

namespace ResourceWithIdentityServerWithClient
{
    public class IdentityWithAdditionalClaimsProfileService : IProfileService
    {
        private readonly IUserClaimsPrincipalFactory<ApplicationUser> _claimsFactory;
        private readonly UserManager<ApplicationUser> _userManager;

        public IdentityWithAdditionalClaimsProfileService(UserManager<ApplicationUser> userManager,  IUserClaimsPrincipalFactory<ApplicationUser> claimsFactory)
        {
            _userManager = userManager;
            _claimsFactory = claimsFactory;
        }

        public async Task GetProfileDataAsync(ProfileDataRequestContext context)
        {
            var sub = context.Subject.GetSubjectId();

            var user = await _userManager.FindByIdAsync(sub);
            var principal = await _claimsFactory.CreateAsync(user);

            var claims = principal.Claims.ToList();
            if (!context.AllClaimsRequested)
            {
                claims = claims.Where(claim => context.RequestedClaimTypes.Contains(claim.Type)).ToList();
            }

            claims.Add(new Claim(JwtClaimTypes.GivenName, user.UserName));
            //new Claim(JwtClaimTypes.Role, "admin"),
            //new Claim(JwtClaimTypes.Role, "dataEventRecords.admin"),
            //new Claim(JwtClaimTypes.Role, "dataEventRecords.user"),
            //new Claim(JwtClaimTypes.Role, "dataEventRecords"),
            //new Claim(JwtClaimTypes.Role, "securedFiles.user"),
            //new Claim(JwtClaimTypes.Role, "securedFiles.admin"),
            //new Claim(JwtClaimTypes.Role, "securedFiles")

            if (user.IsAdmin)
            {
                claims.Add(new Claim(JwtClaimTypes.Role, "admin"));
            }
            else
            {
                claims.Add(new Claim(JwtClaimTypes.Role, "user"));
            }

            if (user.DataEventRecordsRole == "dataEventRecords.admin")
            {
                claims.Add(new Claim(JwtClaimTypes.Role, "dataEventRecords.admin"));
                claims.Add(new Claim(JwtClaimTypes.Role, "dataEventRecords.user"));
                claims.Add(new Claim(JwtClaimTypes.Role, "dataEventRecords"));
            }
            else
            {
                claims.Add(new Claim(JwtClaimTypes.Role, "dataEventRecords.user"));
                claims.Add(new Claim(JwtClaimTypes.Role, "dataEventRecords"));
            }

            if (user.SecuredFilesRole == "securedFiles.admin")
            {
                claims.Add(new Claim(JwtClaimTypes.Role, "securedFiles.admin"));
                claims.Add(new Claim(JwtClaimTypes.Role, "securedFiles.user"));
                claims.Add(new Claim(JwtClaimTypes.Role, "securedFiles"));
            }
            else
            {
                claims.Add(new Claim(JwtClaimTypes.Role, "securedFiles.user"));
                claims.Add(new Claim(JwtClaimTypes.Role, "securedFiles"));
            }

            claims.Add(new System.Security.Claims.Claim(StandardScopes.Email.Name, user.Email));
            

            context.IssuedClaims = claims;
        }

        public async Task IsActiveAsync(IsActiveContext context)
        {
            var sub = context.Subject.GetSubjectId();
            var user = await _userManager.FindByIdAsync(sub);
            context.IsActive = user != null;
        }
    }
}

Step 2: Add the Web API for the resource data

The MVC Controller DataEventRecordsController is used for CRUD API requests. This is just a dummy implementation. I would implement all resource server logic in a separate project. The Authorize attribute is used with and without policies. The policies are configured in the Startup class.

using ResourceWithIdentityServerWithClient.Model;

using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.Mvc;
using System.Collections.Generic;
using System;

namespace ResourceWithIdentityServerWithClient.Controllers
{
    [Authorize]
    [Route("api/[controller]")]
    public class DataEventRecordsController : Controller
    {
        [Authorize("dataEventRecordsUser")]
        [HttpGet]
        public IActionResult Get()
        {
            return Ok(new List<DataEventRecord> { new DataEventRecord { Id =1, Description= "Fake", Name="myname", Timestamp= DateTime.UtcNow } });
        }

        [Authorize("dataEventRecordsAdmin")]
        [HttpGet("{id}")]
        public IActionResult Get(long id)
        {
            return Ok(new DataEventRecord { Id = 1, Description = "Fake", Name = "myname", Timestamp = DateTime.UtcNow });
        }

        [Authorize("dataEventRecordsAdmin")]
        [HttpPost]
        public void Post([FromBody]DataEventRecord value)
        {
            
        }

        [Authorize("dataEventRecordsAdmin")]
        [HttpPut("{id}")]
        public void Put(long id, [FromBody]DataEventRecord value)
        {
            
        }

        [Authorize("dataEventRecordsAdmin")]
        [HttpDelete("{id}")]
        public void Delete(long id)
        {
            
        }
    }
}

Step 3: Add client Angular 2 client API

The Angular 2 client part of the application is setup and using the ASP.NET Core, Angular2 with Webpack and Visual Studio article. Webpack is then used to build the client application.

Any SPA client can be used which supports the OpenID Connect Implicit Flow. IdentityServer4 (IdentityModel) also have good examples using the OIDC javascript client.

Step 4: Configure application host URL

The URL host is the same for both the client and the server. This is configured in the Config class as a static property HOST_URL and used throughout the server side of the application.

public class Config
{
        public static string HOST_URL =  "https://localhost:44363";

The client application reads the configuration from the app.constants.ts provider.

import { Injectable } from '@angular/core';

@Injectable()
export class Configuration {
    public Server: string = "https://localhost:44363";
}

IIS Express is configured to run with HTTPS and matches these configurations. If a different port is used, you need to change these two code configurations. In a production environment, the data should be configurable pro deployment.

Step 5: Deactivate the consent view

The consent view is deactivated because the client is the only client to use this data resource and always requires the same consent. To improve the user experience, the consent view is removed from the flow. This is done by setting the RequireConsent property to false in the client configuration.

public static IEnumerable<Client> GetClients()
{
	// client credentials client
	return new List<Client>
	{
		new Client
		{
			ClientName = "singleapp",
			ClientId = "singleapp",
			RequireConsent = false,
			AccessTokenType = AccessTokenType.Reference,
			//AccessTokenLifetime = 600, // 10 minutes, default 60 minutes
			AllowedGrantTypes = GrantTypes.Implicit,
			AllowAccessTokensViaBrowser = true,
			RedirectUris = new List<string>
			{
				HOST_URL

			},
			PostLogoutRedirectUris = new List<string>
			{
				HOST_URL + "/Unauthorized"
			},
			AllowedCorsOrigins = new List<string>
			{
				HOST_URL
			},
			AllowedScopes = new List<string>
			{
				"openid",
				"dataEventRecords"
			}
		}
	};
}

Step 6: Deactivate logout screens

When the Angular 2 client requests a logout, the client is logged out, reference tokens are invalidated for this application and user, and the user is redirected back to the Angular 2 application without the server account logout views. This improves the user experience.

The existing 2 Logout action methods are removed from the AccountController and the following is implemented. The controller requires the IPersistedGrantService to remove the reference tokens.

/// <summary>
/// special logout to skip logout screens
/// </summary>
/// <param name="logoutId"></param>
/// <returns></returns>
[HttpGet]
public async Task<IActionResult> Logout(string logoutId)
{
	var user = HttpContext.User.Identity.Name;
	var subjectId = HttpContext.User.Identity.GetSubjectId();

	// delete authentication cookie
	await HttpContext.Authentication.SignOutAsync();


	// set this so UI rendering sees an anonymous user
	HttpContext.User = new ClaimsPrincipal(new ClaimsIdentity());

	// get context information (client name, post logout redirect URI and iframe for federated signout)
	var logout = await _interaction.GetLogoutContextAsync(logoutId);

	var vm = new LoggedOutViewModel
	{
		PostLogoutRedirectUri = logout?.PostLogoutRedirectUri,
		ClientName = logout?.ClientId,
		SignOutIframeUrl = logout?.SignOutIFrameUrl
	};


	await _persistedGrantService.RemoveAllGrantsAsync(subjectId, "singleapp");

	return Redirect(Config.HOST_URL + "/Unauthorized");
}

Step 7: Configure Startup to use all three application parts

The Startup class configures all three application parts to run together. The Angular 2 application requires that its client routes are routed on the client and not the server. Middleware is added so that the server does not handle the client routes.

The API service needs to check the reference token and validate. Policies are added for this and also the extension method UseIdentityServerAuthentication is used to check the reference tokens for each request.

IdentityServer4 is setup to use Identity with a SQLite database.

using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Identity.EntityFrameworkCore;
using Microsoft.EntityFrameworkCore;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using IdentityServerWithAspNetIdentity.Data;
using IdentityServerWithAspNetIdentity.Models;
using IdentityServerWithAspNetIdentity.Services;
using QuickstartIdentityServer;
using IdentityServer4.Services;
using System.Security.Cryptography.X509Certificates;
using System.IO;
using System.Linq;
using Microsoft.AspNetCore.Http;
using System.Collections.Generic;
using System.IdentityModel.Tokens.Jwt;
using System;
using Microsoft.AspNetCore.Authorization;
using IdentityServer4.AccessTokenValidation;

namespace ResourceWithIdentityServerWithClient
{
    public class Startup
    {
        private readonly IHostingEnvironment _environment;

        public Startup(IHostingEnvironment env)
        {
            var builder = new ConfigurationBuilder()
                .SetBasePath(env.ContentRootPath)
                .AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
                .AddJsonFile($"appsettings.{env.EnvironmentName}.json", optional: true);

            if (env.IsDevelopment())
            {
                builder.AddUserSecrets();
            }

            _environment = env;

            builder.AddEnvironmentVariables();
            Configuration = builder.Build();
        }

        public IConfigurationRoot Configuration { get; }

        public void ConfigureServices(IServiceCollection services)
        {
            var cert = new X509Certificate2(Path.Combine(_environment.ContentRootPath, "damienbodserver.pfx"), "");

            services.AddDbContext<ApplicationDbContext>(options =>
                options.UseSqlite(Configuration.GetConnectionString("DefaultConnection")));

            services.AddIdentity<ApplicationUser, IdentityRole>()
            .AddEntityFrameworkStores<ApplicationDbContext>()
            .AddDefaultTokenProviders();

            var guestPolicy = new AuthorizationPolicyBuilder()
            .RequireAuthenticatedUser()
            .RequireClaim("scope", "dataEventRecords")
            .Build();

            services.AddAuthorization(options =>
            {
                options.AddPolicy("dataEventRecordsAdmin", policyAdmin =>
                {
                    policyAdmin.RequireClaim("role", "dataEventRecords.admin");
                });
                options.AddPolicy("dataEventRecordsUser", policyUser =>
                {
                    policyUser.RequireClaim("role", "dataEventRecords.user");
                });

            });

            services.AddMvc();

            services.AddTransient<IProfileService, IdentityWithAdditionalClaimsProfileService>();

            services.AddTransient<IEmailSender, AuthMessageSender>();
            services.AddTransient<ISmsSender, AuthMessageSender>();

            services.AddDeveloperIdentityServer()
                .SetSigningCredential(cert)
                .AddInMemoryScopes(Config.GetScopes())
                .AddInMemoryClients(Config.GetClients())
                .AddAspNetIdentity<ApplicationUser>()
                .AddProfileService<IdentityWithAdditionalClaimsProfileService>();
        }

        public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
        {
            loggerFactory.AddConsole(Configuration.GetSection("Logging"));
            loggerFactory.AddDebug();

            var angularRoutes = new[] {
                "/Unauthorized",
                "/Forbidden",
                "/home",
                "/dataeventrecords/",
                "/dataeventrecords/create",
                "/dataeventrecords/edit/",
                "/dataeventrecords/list",
                };

            app.Use(async (context, next) =>
            {
                if (context.Request.Path.HasValue && null != angularRoutes.FirstOrDefault(
                    (ar) => context.Request.Path.Value.StartsWith(ar, StringComparison.OrdinalIgnoreCase)))
                {
                    context.Request.Path = new PathString("/");
                }

                await next();
            });

            app.UseDefaultFiles();

            if (env.IsDevelopment())
            {
                app.UseDeveloperExceptionPage();
                app.UseDatabaseErrorPage();
                app.UseBrowserLink();
            }
            else
            {
                app.UseExceptionHandler("/Home/Error");
            }

            app.UseIdentity();
            app.UseIdentityServer();

            app.UseStaticFiles();

            JwtSecurityTokenHandler.DefaultInboundClaimTypeMap.Clear();

            IdentityServerAuthenticationOptions identityServerValidationOptions = new IdentityServerAuthenticationOptions
            {
                Authority = Config.HOST_URL + "/",
                ScopeName = "dataEventRecords",
                ScopeSecret = "dataEventRecordsSecret",
                AutomaticAuthenticate = true,
                SupportedTokens = SupportedTokens.Both,
                // TokenRetriever = _tokenRetriever,
                // required if you want to return a 403 and not a 401 for forbidden responses
                AutomaticChallenge = true,
            };

            app.UseIdentityServerAuthentication(identityServerValidationOptions);

            app.UseMvcWithDefaultRoute();
        }
    }
}

The application can then be run and tested. To test, right click the project and debug.

Links

https://github.com/IdentityServer/IdentityServer4

http://docs.identityserver.io/en/dev/

https://github.com/IdentityServer/IdentityServer4.Samples

https://docs.asp.net/en/latest/security/authentication/identity.html

https://github.com/IdentityServer/IdentityServer4/issues/349

ASP.NET Core, Angular2 with Webpack and Visual Studio



Andrew Lock: Injecting services into ValidationAttributes in ASP.NET Core

Injecting services into ValidationAttributes in ASP.NET Core

I was battling the other day writing a custom DataAnnotations ValidationAttribute, where I needed access to a service class to perform the validation. The documentation on creating custom attributes is excellent, covering both server side and client side validation, but it doesn't mention this, presumably relatively common, requirement. This post describes how to use dependency injection with ValidationAttributes in ASP.NET Core, and the process I took in trying to figure out how!

Injecting services into attributes in general has always been somewhat problematic as you can't use constructor injection for anything that's not a constant. This often leads to implementations requiring some sort of service locator pattern when external services are required, or a factory pattern to create the attributes.

tl;dr; ValidationAttribute.IsValid() provides a ValidationContext parameter you can use to retrieve services from the DI container by calling GetService().

Injecting services into ActionFilters

In ASP.NET Core MVC, as well having simple 'normal' IFilter attributes that can be used to decorate your actions, there are ServiceFilter and TypeFilter attributes. These implement the IFilterFactory interface, which, as the name suggests, acts as a factory for IFilters!

These two filter types allow you to use classes with constructor dependencies as attributes. For example, we can create an IFilter implementation that has external dependencies:

public class FilterClass : ActionFilterAttribute  
{
  public FilterClass(IDependency1 dependency1, IDependency2 dependency2)
  {
    // ...use dependencies
  }
}

We can then decorate our controller actions to use FilterClass by using the ServiceFilter or TypeFilter:

public class HomeController: Controller  
{
    [TypeFilter(typeof(FilterClass))]
    [ServiceFilter(typeof(FilterClass))]
    public IActionResult Index()
    {
        return View();
    }
}

Both of these attributes will return an instance of the FilterClass to the MVC Pipeline when requested, as though the FilterClass was an attribute applied directly to the Action. The difference between them lies in how they create an instance of the FilterClass.

The ServiceFilter will attempt to resolve an instance of FilterClass directly from the IoC container, so the FilterClass and its dependencies must be registered with the IoC container.

The Typefilter attribute also creates an instance of the FilterClass but only its dependencies are resolved from the IoC Container, rather than FilterClass.

For more details on using TypeFilter and ServiceFilter see the documentation or this post.

How ValidationAttributes are resolved

For my CustomValidationAttribute I needed access to an external service to perform the validation:

public class CustomValidationAttribute: ValidationAttribute  
{
  protected override ValidationResult IsValid(object value, ValidationContext validationContext)
    {
        // ... need access to external service here
    }
}

In my first attempt to inject a service I thought I would have to take a similar approach to the ServiceFilter and TypeFilter attributes. Optimistically, I created a TypeFilter, passed in my CustomValidationAttribute, applied it to the model property and crossed my fingers.

It didn't work.

The mechanism by which DataAnnotation ValidationAttributes are applied to your model is completely different to the IFilter and IFilterFactory attributes used by the MVC infrastructure to build a pipeline.

The default implementation of IModelValidatorProvider used by the Microsoft.AspNetCore.Mvc.DataAnnotations library (cunningly called DataAnnotationsModelValidatorProvider) is responsible for creating the IModelValidator instances in the method CreateValidators. The IModelValidator is responsible for performing the actual validation of a decorated property.

I thought about creating a custom IModelValidatorProvider and creating the validators myself using an ObjectFactory, similar to the way the ServiceFilter and TypeFilter work.

Inside the DataAnnotationsModelValidatorProvider.CreateValidators method is this section of code, which creates a DataAnnotationsModelValidator object from a ValidationAttribute (see here for the full code):

var attribute = validatorItem.ValidatorMetadata as ValidationAttribute;  
if (attribute == null)  
{
    continue;
}

var validator = new DataAnnotationsModelValidator(  
    _validationAttributeAdapterProvider,
    attribute,
    stringLocalizer);

As you can see, the attributes are already created at this point, and exist as ValidatorMetadata on the ModelValidatorProviderContext passed to the function. In order to be able to use a TypeFilter-like approach, we would have to hook in much further up the stack.

At this point I decided that I must be missing something, as it couldn't possibly be this difficult…

The solution

Sure enough, the final answer was simple!

When creating a custom validation attribute you need to override the Validate method:

public class CustomValidationAttribute : ValidationAttribute  
{
    protected override ValidationResult IsValid(object value, ValidationContext validationContext)
    {
        // ... validation logic
    }
}

As you can see, you are provided a ValidationContext as part of the method call. The context object contains a number of properties related to the object currently being validated, and also this handy number:

public object GetService(Type serviceType);  

This hooks into the IoC IServiceProvider to allow retrieving services in your ValidationAttributes:

protected override ValidationResult IsValid(object value, ValidationContext validationContext)  
{
    var service = validationContext.GetService(typeof(IExternalService));
    // use service
}

So in the end, nice and easy, no need for the complex re-implementations route I was eyeing up.

Happy validating!


Dominick Baier: New in IdentityServer4: Resource Owner Password Validation

Not completely new, but re-designed.

In IdentityServer3, we used the user service for both interactive as well as non-interactive authentication. In IdentityServer4, the interactive authentication is done by the UI.

OAuth 2 resource owner password validation is disabled by default – but you can add support for it by implementing and registering the IResourceOwnerPasswordValidator interface.

This gives you more flexibility as in IdentityServer3 since you get access to the raw request and you have more control over the token response via the new GrantValidationResult.


Filed under: ASP.NET, IdentityServer, OAuth, WebAPI


Andrew Lock: Localising the DisplayAttribute and avoiding magic strings in ASP.NET Core

Localising the DisplayAttribute and avoiding magic strings in ASP.NET Core

This post follows on from my previous post about localising an ASP.NET Core application. At the end of that article, we had localised our application so that the user could choose their culture, which would update the page title and the validation attributes with the appropriate translation, but not the form labels. In this post, we cover some of the problems you may run into when localising your application and approaches to deal with them.

Localising the DisplayAttribute and avoiding magic strings in ASP.NET Core

Brief Recap

Just so we're all on the same page, I'll briefly recap how localisation works in ASP.NET Core. If you would like a more detailed description, check out my previous post or the documentation.

Localisation is handled in ASP.NET Core through two main abstractions IStringLocalizer and IStringLocalizer<T>. These allow you to retrieve the localised version of a key by passing in a string; if the key does not exist for that resource, or you are using the default culture, the key itself is returned as the resource:

public class ExampleClass  
{
    public ExampleClass(IStringLocalizer<ExampleClass> localizer)
    {
        // If the resource exists, this returns the localised string
        var localisedString1 = _localizer["I exist"]; // "J'existe"

        // If the resource does not exist, the key itself  is returned
        var localisedString2 = _localizer["I don't exist"]; // "I don't exist"
    }
}

Resources are stored in .resx files that are named according to the class they are localising. So for example, the IStringLocalizer<ExampleClass> localiser would look for a file named (something similar to) ExampleClass.fr-FR.resx. Microsoft recommends that the resource keys/names in the .resx files are the localised values in the default culture. That way you can write your application without having to create any resource files - the supplied string will be used as the resource.

As well as arbitrary strings like this, DataAnnotations which derive from ValidationAttribute also have their ErrorMessage property localised automatically. However the DisplayAttribute and other non-ValidationAttributes are not localised.

Finally, you can localise your Views, either providing whole replacements for your View by using filenames of the form Index.fr-FR.cshtml, or by localising specific strings in your view with another abstraction, the IViewLocalizer, which acts as a view-specific wrapper around IStringLocalizer.

Some of the pitfalls

There are two significant issues I personally find with the current state of localisation;

  1. Magic strings everywhere
  2. Can't localise the DisplayAttribute

The first of these is a design decision by Microsoft, to reduce the ceremony of localising an application. Instead of having to worry about extracting all your hard coded strings out of the code and into .resx files, you can just wrap it in a call to the IStringLocalizer and worry about localising other languages down the line.

While the attempt to improve productivity is a noble goal, it comes with a risk. The problem is that the string values embedded in your code ("I exist" and "I don't exist" in the code above) are serving a dual purpose, both as a string resource for the default culture, and as a key into a resource dictionary.

Inevitably, at some point you will introduce a typo into one of your string resources, it's just a matter of time. You better be sure whoever spots it understands the implications of changing it however, as fixing your typo will cause every other localised language to break. The default resource which is embedded in your code can only be changed if you ensure that every other resource file changes at the same time. That coupling is incredibly fragile, and it will not necessarily be obvious to the person correcting the typo that anything has broken. It is only obvious if they explicitly change culture and notice that the string is no longer localised.

The second issue related to the DisplayAttribute seems like a fairly obvious omission - by it's nature it contains values which are normally highly visible (used as labels for a form) and will pretty much always need to be localised. As I'll show shortly there are workarounds for this, but currently they are rather clumsy.

It may be that these issues either don't bother you or are not a big deal, but I wanted to work out how to deal with them in a way that made me more comfortable. In the next sections I show how I did that.

Removing the magic strings

Removing the magic strings is something that I tend to do in any new project. MVC typically uses strings for any sort of dictionary storage, for example Session storage, ViewData, AuthorizationPolicy names, the list goes on. I've been bitten too many times by subtle typos causing unexpected behaviour that I like to pull these strings into utility classes with names like ViewDataKeys and PolicyNames:

public static class ViewDataKeys  
{
    public const string Title = "Title";
}

That way, I can use the strongly typed Title property whenever I'm accessing ViewData - I get intellisense, avoid typos and get renaming safely. This is a pretty common approach, and it can be applied just as easily with our localisation problem.

public static class ResourceKeys  
{
    public const string HomePage = "HomePage";
    public const string Required = "Required";
    public const string NotAValidEmail = "NotAValidEmail";
    public const string YourEmail = "YourEmail";
}

Simply create a static class to hold your string key names, and instead of using the resource in the default culture as the key, use the appropriate strongly typed member:

public class HomeViewModel  
{
    [Required(ErrorMessage = ResourceKeys.Required)]
    [EmailAddress(ErrorMessage = ResourceKeys.NotAValidEmail)]
    [Display(Name = "Your Email")]
    public string Email { get; set; }
}

Here you can see the ErrorMessage properties of our ValidationAttributes reference the static properties instead of the resource in the default culture.

The final step is to add a .resx file for each localised class for the default language (without a culture suffix on the file name). This is the downside to this approach that Microsoft were trying to avoid with their design, and I admit, it is a bit of a drag. But at least you can fix typos in your strings without breaking all your other languages!

How to Localise DisplayAttribute

Now we have the magic strings fixed, we just need to try and localise the DisplayAttribute. As of right now, the only way I have found to localise the display attribute is to use the legacy localisation capabilities which still reside in the DataAnnotation attributes, namely the ResourceType property.

This property is a Type, and allows you to specify a class in your solution that contains a static property corresponding to the value provided in the Name of the DisplayAttribute. This allows us to use the Visual Studio resource file designer to auto-generate a backing class with the required properties to act as hooks for the localisation.

Localising the DisplayAttribute and avoiding magic strings in ASP.NET Core

If you create a .resx file in Visual Studio without a culture suffix, it will automatically create a .designer.cs file for you. With the new localisation features of ASP.NET Core, this can typically be deleted, but in this case we need it. Generating the above resource file in Visual Studio will generate a backing class similar to the following:

public class ViewModels_HomeViewModel {

    private static global::System.Resources.ResourceManager resourceMan;
    private static global::System.Globalization.CultureInfo resourceCulture;

    // details hidden for brevity

    public static string NotAValidEmail {
        get {
            return ResourceManager.GetString("NotAValidEmail", resourceCulture);
        }
    }

    public static string Required {
        get {
            return ResourceManager.GetString("Required", resourceCulture);
        }
    }

    public static string YourEmail {
        get {
            return ResourceManager.GetString("YourEmail", resourceCulture);
        }
    }

We can now update our display attribute to use the generated resource, and everything will work as expected. We'll also remove the magic string from the Name attribute at this point and move the resource into our .resx file:

public class HomeViewModel  
{
    [Required(ErrorMessage = ResourceKeys.Required)]
    [EmailAddress(ErrorMessage = ResourceKeys.NotAValidEmail)]
    [Display(Name = ResourceKeys.YourEmail, ResourceType = typeof(Resources.ViewModels_HomeViewModel))]
    public string Email { get; set; }
}

If we run our application again, you can see that the display attribute is now localised to say 'Votre Email' - lovely!

Localising the DisplayAttribute and avoiding magic strings in ASP.NET Core

How to localise DisplayAttribute in the future

If that seems like a lot of work to get a localised DisplayAttribute then you're not wrong. That's especially true if you're not using Visual Studio, and so don't have the resx-auto-generation process.

Unfortunately it's a tricky problem to work around currently, in that it's just fundamentally not supported in the current version of MVC. The localisation of the ValidationAttribute.ErrorMessage happens deep in the inner workings of the MVC pipeline (in the DataAnnotationsMetadataProvider) and this is ideally where the localisation of the DisplayAttribute should be happening.

Luckily, this has already been fixed and is currently on the development branch of the ASP.NET Core repo. Theoretically that means it should appear in the 1.1.0 release when that happens, but we are at very early days at the moment!

Still, I wanted to give the current implementation a test, and luckily this is pretty simple to setup, as all the ASP.NET Core packages produced as part of the normal development workflow are pushed to various public MyGet feeds. I decided to use the 'aspnetcore-dev' feed, and updated my application to pull NuGet packages from it.

Be aware that pulling packages from this feed should not be something you do in a production app. Things are likely to change and break, so stick to the release NuGet feed unless you are experimenting or you know what you're doing!

Adding a pre-release MVC package

First, add a nuget.config file to your project and configure it to point to the aspnetcore-dev feed:

<?xml version="1.0" encoding="utf-8"?>  
<configuration>  
  <packageSources>
    <add key="AspNetCore" value="https://dotnet.myget.org/F/aspnetcore-dev/api/v3/index.json" />
    <add key="NuGet" value="https://api.nuget.org/v3/index.json" />
  </packageSources>
</configuration>  

Next, update the MVC package in your project.json to pull down the latest package, as of writing this was version 1.1.0-alpha1-22152, and run a dotnet restore.

{
  "dependencies": {
    ...
    "Microsoft.AspNetCore.Mvc": "1.1.0-alpha1-22152",
    ...
  }
}

And that's it! We can remove the ugly ResourceType property from a DisplayAttribute, delete our resource .designer.cs file and everything just works as you would expect. If you are using the magic string approach, that just works, or you can use the approach I described above with ResourceKeys.

public class HomeViewModel  
{
    [Required(ErrorMessage = ResourceKeys.Required)]
    [EmailAddress(ErrorMessage = ResourceKeys.NotAValidEmail)]
    [Display(Name = ResourceKeys.YourEmail)]
    public string Email { get; set; }
}

As already mentioned, this is early pre-release days, so it will be a while until this capability is generally available, but it's heartening to see it ready and waiting!

Loading all resources from a single file

The final slight bugbear I have with the current localisation implementation is the resource file naming. As described in the previous post, each localised class or view gets its own embedded resource file that has to match the file name. I was toying with the idea of having a a single .resx file for each culture which contains all the required strings instead, with the resource key prefixed by the type name, but I couldn't see any way of doing this out of the box.

You can get close to this out of the box, by using a 'Shared resource' as the type parameter in injected IStringLocalizer<T>, so that all the resources using it will, by default, be found in a single .resx file. Unfortunately that only goes part of the way, as you are still left with the DataAnnotations and IViewLocalizer which will use the default implementations, and expect different files per class.

As far as I can see, in order to achieve this, we need to replace the IStringLocalizer and IStringLocalizerFactory services with our own implementations that will load the strings from a single file. Given this small change, I looked at just overriding the default ResourceManagerStringLocalizerFactory implementation, however the methods that would need changing are not virtual, which leaves us re-implementing the whole class again.

The code is a little long and tortuous, and this post is already long enough, so I won't post it here, but you can find the approach I took on GitHub. It is in a somewhat incomplete but working state, so if anyone is interested in using it then it should provide a good starting point for a proper implementation.

For my part, and given the difficulty of working with .resx files outside of Visual Studio, I have started to look at alternative storage formats. Thanks to the use of abstractions like IStringLocalizerFactory in ASP.NET Core, it is perfectly possible to load resources from other sources.

In particular, Damien has a great post with source code on GitHub on loading resources from the database using Entity Framework Core. Alternatively, Ronald Wildenberg has built a JsonLocalizer which is available on GitHub.

Summary

In this post I described a couple of the pitfalls of the current localisation framework in ASP.NET Core. I showed how magic strings could be the source of bugs and how to replace them with a static helper class.

I also showed how to localise the DisplayAttribute using the ResourceType property as required in the current 1.0.0 release of ASP.NET Core, and showed how it will work in the (hopefully near) future.

Finally I linked to an example project that stores all resources in a single file per culture, instead of a file per resource type.


Damien Bowden: Setting the NLog database connection string in the ASP.NET Core appsettings.json

This article shows how the NLog connection string for the DatabaseTarget can be configured in the appsettings.json in an ASP.NET Core project and not the XML nlog.config file. All the NLog target properties can be configured in code if required and not just in the NLog XML configuration file.

Code: https://github.com/damienbod/AspNetCoreNlog

NLog posts in this series:

  1. ASP.NET Core logging with NLog and Microsoft SQL Server
  2. ASP.NET Core logging with NLog and Elasticsearch
  3. Settings the NLog database connection string in the ASP.NET Core appsettings.json

The XML nlog.config file is the same as in the previous post, with no database connection string configured.

<?xml version="1.0" encoding="utf-8" ?>
<nlog xmlns="http://www.nlog-project.org/schemas/NLog.xsd"
      xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
      autoReload="true"
      internalLogLevel="Warn"
      internalLogFile="C:\git\damienbod\AspNetCoreNlog\Logs\internal-nlog.txt">

           
  <targets>

    <target name="database" xsi:type="Database" >

<!-- THIS is not required, read from the appsettings.json

<connectionString>
        Data Source=N275\MSSQLSERVER2014;Initial Catalog=Nlogs;Integrated Security=True;
</connectionString>
-->

<!--
  Remarks:
    The appsetting layouts require the NLog.Extended assembly.
    The aspnet-* layouts require the NLog.Web assembly.
    The Application value is determined by an AppName appSetting in Web.config.
    The "NLogDb" connection string determines the database that NLog write to.
    The create dbo.Log script in the comment below must be manually executed.

  Script for creating the dbo.Log table.

  SET ANSI_NULLS ON
  SET QUOTED_IDENTIFIER ON
  CREATE TABLE [dbo].[Log] (
      [Id] [int] IDENTITY(1,1) NOT NULL,
      [Application] [nvarchar](50) NOT NULL,
      [Logged] [datetime] NOT NULL,
      [Level] [nvarchar](50) NOT NULL,
      [Message] [nvarchar](max) NOT NULL,
      [Logger] [nvarchar](250) NULL,
      [Callsite] [nvarchar](max) NULL,
      [Exception] [nvarchar](max) NULL,
    CONSTRAINT [PK_dbo.Log] PRIMARY KEY CLUSTERED ([Id] ASC)
      WITH (PAD_INDEX  = OFF, STATISTICS_NORECOMPUTE  = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS  = ON, ALLOW_PAGE_LOCKS  = ON) ON [PRIMARY]
  ) ON [PRIMARY]
-->

          <commandText>
              insert into dbo.Log (
              Application, Logged, Level, Message,
              Logger, CallSite, Exception
              ) values (
              @Application, @Logged, @Level, @Message,
              @Logger, @Callsite, @Exception
              );
          </commandText>

          <parameter name="@application" layout="AspNetCoreNlog" />
          <parameter name="@logged" layout="${date}" />
          <parameter name="@level" layout="${level}" />
          <parameter name="@message" layout="${message}" />

          <parameter name="@logger" layout="${logger}" />
          <parameter name="@callSite" layout="${callsite:filename=true}" />
          <parameter name="@exception" layout="${exception:tostring}" />
      </target>
      
  </targets>

  <rules>
    <logger name="*" minlevel="Trace" writeTo="database" />
      
  </rules>
</nlog>

The NLog DatabaseTarget connectionstring is configured in the appsettings.json as described in the ASP.NET Core configuration docs.

{
    "Logging": {
        "IncludeScopes": false,
        "LogLevel": {
            "Default": "Debug",
            "System": "Information",
            "Microsoft": "Information"
        }
    },
    "ElasticsearchUrl": "http://localhost:9200",
    "ConnectionStrings": {
        "NLogDb": "Data Source=N275\\MSSQLSERVER2014;Initial Catalog=Nlogs;Integrated Security=True;"
    }
}

The configuration is then read in the Startup constructor.

public Startup(IHostingEnvironment env)
{
	var builder = new ConfigurationBuilder()
		.SetBasePath(env.ContentRootPath)
		.AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
		.AddJsonFile($"appsettings.{env.EnvironmentName}.json", optional: true)
		.AddEnvironmentVariables();
	Configuration = builder.Build();
}

The Nlog DatabaseTagert is then configured to use the connection string from the app settings and sets all the DatabaseTarget instances for NLog to use this. All target properties can be configured in this way if required.

public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
{
	loggerFactory.AddNLog();

	foreach (DatabaseTarget target in LogManager.Configuration.AllTargets.Where(t => t is DatabaseTarget))
	{
		target.ConnectionString = Configuration.GetConnectionString("NLogDb");
	}
	
	LogManager.ReconfigExistingLoggers();
	
	app.UseMvc();
}

Links

https://github.com/NLog/NLog.Extensions.Logging

https://github.com/NLog

https://github.com/NLog/NLog/blob/38aef000f916bd5ffd8b80a5576afa2423192e84/examples/targets/Configuration%20API/Database/MSSQL/Example.cs

https://docs.asp.net/en/latest/fundamentals/logging.html

https://msdn.microsoft.com/en-us/magazine/mt694089.aspx

https://github.com/nlog/NLog/wiki/Database-target

https://docs.asp.net/en/latest/fundamentals/configuration.html



Andrew Lock: How to use machine-specific configuration with ASP.NET Core

How to use machine-specific configuration with ASP.NET Core

In this quick post I'll show how to easily setup machine-specific configuration in your ASP.NET Core applications. This allows you to use different settings depending on the name of the machine you are using.

The tl;dr; version is to add a json file to your project containing your computer's name, e.g. appsettings.MACHINENAME.json, and update your ConfigurationBuilder in Startup with the following line:

.AddJsonFile($"appsettings.{Environment.MachineName}.json", optional: true)

Background

Why would you want to do this? Well, it depends.

When working on an application with multiple people, you will often run into a situation where you need different configuration settings for each developer's machine. Typically, we find that file paths and sometimes connection strings need to customised per developer.

In ASP.NET 4.x we found this somewhat of an ordeal to manage. Typically, we would create a connection string for each developer's machine, and create appsettings of the form MACHINENAME_APPSETTINGNAME. For example,

<configuration>  
  <connectionStrings>
    <add name="DAVES-MACBOOK" connectionString="Data Source=DAVES-MACBOOK;Initial Catalog=TestApp; Trusted_Connection=True;" />
    <add name="JON-PC" connectionString="Data Source=JON-PC;Initial Catalog=TestAppDb; Trusted_Connection=True;" />
  </connectionStrings>
  <appSettings>
    <add key="DAVES-MACBOOK_StoragePath" value="D:\" />
    <add key="JON-PC_StoragePath" value="C:\Dump" />
  </appSettings>
</configuration>  

So in this case, for the two developer machines named DAVES-MACBOOK and JON-PC, we have a different connection string for each machine, as well as different values for each of the StoragePath application settings.

This requires a bunch of wrapper classes around accessing appsettings which, while a good idea generally, is a bit of an annoyance and ends up polluting web.config.

The new way in ASP.NET Core

With ASP.NET Core, the updated configuration system allows for a much cleaner replacement of settings depending on the environment.

For example, in the default configuration for a web application, you can have environment specific appsettings files such as appsettings.Production.json which will override the default values in the appropriate environment:

public Startup(IHostingEnvironment env)  
{
    var builder = new ConfigurationBuilder()
        .SetBasePath(env.ContentRootPath)
        .AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
        .AddJsonFile($"appsettings.{env.EnvironmentName}.json", optional: true)
        .AddEnvironmentVariables();
    Configuration = builder.Build();
}

Similarly, environment variables and UserSecrets can be used to override the default values. It's likely that in the majority of cases, these are perfect for the situation described above - they apply only to the single machine, and can override the default values provided.

In larger teams and projects this approach will almost certainly be the correct one - each individual machine contains the the specific settings for just that machine, and the repo isn't polluted with 101 different versions of the same setting.

However, it may be desirable in some cases, particularly in smaller teams, to actually store these values in the repo. Environment variables can be overwritten, UserSecrets can be deleted etc etc. With the .NET Core configuration system this alternative approach simple to achieve with a single additional line:

.AddJsonFile($"appsettings.{Environment.MachineName}.json", optional: true)

This uses string interpolation to insert the current machine name in the file path. The Environment class contains a number of environment-specific static properties like ProcessorCount, NewLine and luckily for us, MachineName. Using this approach, we can add a configuration file for each user with their machine-specific values e.g.

appsettings.DAVES-MACBOOK.json:

{
  "ConnectionStrings": {
    "DefaultConnection": "Data Source=DAVES-MACBOOK;Initial Catalog=TestApp; Trusted_Connection=True;"
  },
  "StoragePath": "D:\"
}

appsettings.JON-PC.json:

{
  "ConnectionStrings": {
    "DefaultConnection": "Data Source=JON-PC;Initial Catalog=TestAppDb; Trusted_Connection=True;"
  },
  "StoragePath": "C:\Dump"
}

Finally, if you want to deploy your machine-specific json files (you quite feasibly may not want to), then be sure to update the publishOptions section of your project.json:

{
  "publishOptions": {
    "include": [
      "wwwroot",
      "Views",
      "appsettings.json",
      "appsettings.*.json",
      "web.config"
    ]
  }
}

Note that you can use a wildcard in the appsettings name to publish all of your machine sepecific appsettings files, but be aware this will also publish all files of the format appsettings.Development.json etc too.

So there you have it, no more wrappers around the built in app configuration, per-machine settings stored in the application repo, and a nice clean interface, all courtesy of the cleanly designed .NET Core configuration system!


Darrel Miller: RPC vs REST is not in the URL

 

In Phil Sturgeon’s article Understanding REST and RPC for HTTP APIs, he makes the assertion that the following URL is not technically RESTful.

POST /trips/123/start

I have made a habit of countering these assertions of “non-restfulness” with the following question: 

Can you point to the REST constraint that is violated and the negative system effects due to the violation of that constraint?

I don’t believe any REST constraint is being violated by that URL.

Machines don’t care what your URL says

As far as I understand, clients in RESTful systems treat URLs as opaque identifiers and therefore there are absolutely no negative system effects of a verb appearing in a URL. 

Obviously having a URL that contains a verb that contradicts the method used, would be confusing to a developer, but that is a documentation issue.  e.g.
 
GET /deleteThing

that safely returns a thing could be RESTful but is very misleading. But,

GET /deleteThing

that deletes an object would violate the uniform interface constraint for HTTP requests and is therefore not RESTful.  The server’s behaviour is inconsistent with the GET HTTP method.  That’s bad.  The word delete in the URL is unfortunate.

RPC is a client thing

My understanding of RPC based on James White’s original definition in RFC 707, is that it is a way of presenting a remote invocation as if it were a local invocation.  The details of how its called and therefore the contents of the URL are irrelevant.  What is relevant is that a local call, in most languages, has the distinct characteristic of have exactly two outcomes.  You either get back the type you were expecting, or you get some kind of exception defined by your programing language of choice.  This is the limitation of RPC that makes it a poor choice for building reliable distributed systems.  If only Steve Vinoski’s talk, RPC and its Offspring: Convenient, Yet Fundamentally Flawed were still available.  He does a much better job than I of explaining the issues.

REST requires flexibility in response handling

The REST uniform interface guarantees that when you make a request to resource, you get a self-descriptive representation that describes the outcome of the request.  This could be one of many different outcomes but uses standardized metadata to convey the meaning of the response. It is the highly descriptive nature of the response that allows systems to be built that are resilient of failure and resilient to change.  That’s what makes something RESTful.

The URL is a name, nothing more


There is no reason that,

POST /SendUserMessage


could not be a completely RESTful interaction.  POST is defined in RFC 7231 as being something that can send data to a processing resource.  From the opposite perspective, there is no reason why,

POST /users/501/messages


could not be wrapped in a client library that exposes the method:

Message CreateMessage(int userId)


which is a RPC method.

Names are important to humans

We are in the unfortunate situation that the term REST has been brutally misused by our industry.  It makes learning what REST is, really difficult.  However, to a large extent, Phil did a really good job of positioning the various distributed architectural approaches.  But I don’t believe the distinction he is making between RPC and REST is correct.   Hence, my post.


Andrew Lock: Adding Localisation to an ASP.NET Core application

Adding Localisation to an ASP.NET Core application

In this post I'll walk through the process of adding localisation to an ASP.NET Core application using the recommended approach with resx resource files.

Introduction to Localisation

Localisation in ASP.NET Core is broadly similar to the way it works in the ASP.NET 4.X. By default you would define a number of .resx resource files in your application, one for each culture you support. You then reference resources via a key, and depending on the current culture, the appropriate value is selected from the closest matching resource file.

While the concept of a .resx file per culture remains in ASP.NET Core, the way resources are used has changed quite significantly. In the previous version, when you added a .resx file to your solution, a designer file would be created, providing static strongly typed access to your resources through calls such as Resources.MyTitleString.

In ASP.NET Core, resources are accessed through two abstractions, IStringLocalizer and IStringLocalizer<T>, which are typically injected where needed via dependency injection. These interfaces have an indexer, that allows you to access resources by a string key. If no resource exists for the key (i.e. you haven't created an appropriate .resx file containing the key), then the key itself is used as the resource.

Consider the following example:

using Microsoft.AspNet.Mvc;  
using Microsoft.Extensions.Localization;

public class ExampleClass  
{
    private readonly IStringLocalizer<ExampleClass> _localizer;
    public ExampleClass(IStringLocalizer<ExampleClass> localizer)
    {
        _localizer = localizer;
    }

    public string GetLocalizedString()
    {
        return _localizer["My localized string"];
    }
}

In this example, calling GetLocalizedString() will cause the IStringLocalizer<T> to check the current culture, and see if we have an appropriate resource file for ExampleClass containing a resource with the name/key "My localized string". If it finds one, it returns the localised version, otherwise, it returns "My Localized string".

The idea behind this approach is to allow you to design your app from the beginning to use localisation, without having to do up front work to support it by creating the default/fallback .resx file. Instead, you can just write the default values, then add the resources in later.

Personally, I'm not sold on this approach - it makes me slightly twitchy to see all those magic strings around which are essentially keys into a dictionary. Any changes to the keys may have unintended consequences, as I'll show later in the post.

Adding localisation to your application

For now, I'm going to ignore that concern, and dive in using Microsoft's recommended approach. I've started from the default ASP.NET Core Web application without authentication - you can find all the code on GitHub.

The first step is to add the localisation services in your application. As we are building an MVC application, we'll also configure View localisation and DataAnnotations localisation. The localisation packages are already referenced indirectly by the Microsoft.AspNetCore.MVC package, so you should be able to add the services and middleware directly in your Startup class:

public void ConfigureServices(IServiceCollection services)  
{
    services.AddLocalization(opts => { opts.ResourcesPath = "Resources"; });

    services.AddMvc()
        .AddViewLocalization(
            LanguageViewLocationExpanderFormat.Suffix,
            opts => { opts.ResourcesPath = "Resources"; })
        .AddDataAnnotationsLocalization();
}

These services allow you to inject the IStringLocalizer service into your classes. They also allow you to have localised View files (so you can have Views with names like MyView.fr.cshtml) and inject the IViewLocalizer, to allow you to use localisation in your view files. Calling AddDataAnnotationsLocalization configures the Validation attributes to retrieve resources via an IStringLocalizer.

The ResourcePath parameter on the Options object specifies the folder of our application in which resources can be found. So if the root of our application is found at ExampleProject, we have specified that our resources will be stored in the folder ExampleProject/Resources.

Configuring these classes is all that is required to allow you to use the localisation services in your application. However you will typically also need some way to select what the current culture is for a given request.

To do this, we use the RequestLocalizationMiddleware. This middleware uses a number of different providers to try and determine the current culture. To configure it with the default providers, we need to decide which cultures we support, and which is the default culture.

Note that the configuration example in the documentation didn't work for me, though the Localization.StarterWeb project they reference did, and is reproduced below.

public void ConfigureServices(IServiceCollection services)  
{
    // ... previous configuration not shown

    services.Configure<RequestLocalizationOptions>(
        opts =>
        {
            var supportedCultures = new[]
            {
                new CultureInfo("en-GB"),
                new CultureInfo("en-US"),
                new CultureInfo("en"),
                new CultureInfo("fr-FR"),
                new CultureInfo("fr"),
            };

            opts.DefaultRequestCulture = new RequestCulture("en-GB");
            // Formatting numbers, dates, etc.
            opts.SupportedCultures = supportedCultures;
            // UI strings that we have localized.
            opts.SupportedUICultures = supportedCultures;
        });
}

public void Configure(IApplicationBuilder app)  
{
    app.UseStaticFiles();

    var options = app.ApplicationServices.GetService<IOptions<RequestLocalizationOptions>>();
    app.UseRequestLocalization(options.Value);

    app.UseMvc(routes =>
    {
        routes.MapRoute(
            name: "default",
            template: "{controller=Home}/{action=Index}/{id?}");
    });
}

Using localisation in your classes

We now have most of the pieces in place to start adding localisation to our application. We don't yet have a way for users to select which culture they want to use, but we'll come to that shortly. For now, lets look at how we go about retrieving a localised string.

Controllers and services

Whenever you want to access a localised string in your services or controllers, you can inject an IStringLocalizer<T> and use its indexer property. For example, imagine you want to localise a string in a controller:

public class HomeController: Controller  
{
    private readonly IStringLocalizer<HomeController> _localizer;

    public HomeController(IStringLocalizer<HomeController> localizer)
    {
        _localizer = localizer;
    }

    public IActionResult Index()
    {
        ViewData["MyTitle"] = _localizer["The localised title of my app!"];
        return View(new HomeViewModel());
    }
}

Calling _localizer[] will lookup the provided string based on the current culture, and the type HomeController. Assuming we have configured our application as discussed previously, the HomeController resides in the ExampleProject.Controllers namespace, and we are currently using the fr culture, then the localizer will look for either of the following resource files:

  • Resources/Controller.HomeController.fr.resx
  • Resources/Controller/HomeController.fr.resx

If a resource exists in one of these files with the key "The localised title of my app!" then it will be used, otherwise the key itself will be used as the resource. This means you don't need to add any resource files to get started with localisation - you can just use the default language string as your key and come back to add .resx files later.

Views

There are two kinds of localisation of views. As described previously, you can localise the whole view, duplicating it and editing as appropriate, and providing a culture suffix. This is useful if the views need to differ significantly between different cultures.

You can also localise strings in a similar way to that shown for the HomeController. Instead of an IStringLocalizer<T>, you inject an IViewLocalizer into the view. This handles HTML encoding a little differently, in that it allows you to store HTML in the resource and it won't be encoded before being output. Generally you'll want to avoid that however, and only localise strings, not HTML.

The IViewLocaliser uses the name of the View file to find the associated resources, so for the HomeController's Index.cshtml view, with the fr culture, the localiser will look for:

  • Resources/Views.Home.Index.fr.resx
  • Resources/Views/Home/Index.fr.resx

The IViewLocalizer is used in a similar way to IStringLocalizer<T> - pass in the string in the default language as the key for the resource:

@using Microsoft.AspNetCore.Mvc.Localization
@model AddingLocalization.ViewModels.HomeViewModel
@inject IViewLocalizer Localizer
@{
    ViewData["Title"] = Localizer["Home Page"];
}
<h2>@ViewData["MyTitle"]</h2>  

DataAnnotations

One final common area that needs localisation is DataAnnotations. These attributes can be used to provide validation, naming and UI hints of your models to the MVC infrastructure. When used, they provide a lot of additional declarative metadata to the MVC pipeline, allowing selection of appropriate controls for editing the property etc.

Error messages for DataAnnotation validation attributes all pass through an IStringLocalizer<T> if you configure your MVC services using AddDataAnnotationsLocalization(). As before, this allows you to specify the error message for an attribute in your default language in code, and use that as the key to other resources later.

public class HomeViewModel  
{
    [Required(ErrorMessage = "Required")]
    [EmailAddress(ErrorMessage = "The Email field is not a valid e-mail address")]
    [Display(Name = "Your Email")]
    public string Email { get; set; }
}

Here you can see we have three DataAnnotation attributes, two of which are ValidationAttributes, and the DisplayAttribute, which is not. The ErrorMessage specified for each ValidationAttribute is used as a key to lookup the appropriate resource using an IStringLocalizer<HomeViewModel>. Again, the files searched for will be something like:

  • Resources/ViewModels.HomeViewModel.fr.resx
  • Resources/ViewModels/HomeViewModel.fr.resx

A key thing to be aware of is that the DisplayAttribute is not localised using the IStringLocalizer<T>. This is far from ideal, but I'll address it in my next post on localisation.

Allowing users to select a culture

With all this localisation in place, the final piece of the puzzle is to actually allow users to select their culture. The RequestLocalizationMiddleware uses an extensible provider mechanism for choosing the current culture of a request, but it comes with three providers built in

  • QueryStringRequestCultureProvider
  • AcceptLanguageHeaderRequestCultureProvider
  • CookieRequestCultureProvider

These allow you to specify a culture in the querystring (e.g ?culture=fr-FR), via the Accept-Language header in a request, or via a cookie. Of the three approaches, using a cookie is the least intrusive, as it will obviously seamlessly be sent with every request, and does not require the user to set the Accept-Language header in their browser, or require adding to the querystring with every request.

Again, the Localization.StarterWeb sample project provides a handy implementation that shows how you can add a select box to the footer of your project to allow the user to set the language. Their choice is stored in a cookie, which is handled by the CookieRequestCultureProvider for each request. The provider then sets the CurrentCulture and CurrentUICulture of the thread for the request to the user's selection.

To add the selector to your application, create a partial view _SelectLanguagePartial.cshtml in the Shared folder of your Views:

@using System.Threading.Tasks
@using Microsoft.AspNetCore.Builder
@using Microsoft.AspNetCore.Localization
@using Microsoft.AspNetCore.Mvc.Localization
@using Microsoft.Extensions.Options

@inject IViewLocalizer Localizer
@inject IOptions<RequestLocalizationOptions> LocOptions

@{
    var requestCulture = Context.Features.Get<IRequestCultureFeature>();
    var cultureItems = LocOptions.Value.SupportedUICultures
        .Select(c => new SelectListItem { Value = c.Name, Text = c.DisplayName })
        .ToList();
}

<div title="@Localizer["Request culture provider:"] @requestCulture?.Provider?.GetType().Name">  
    <form id="selectLanguage" asp-controller="Home"
          asp-action="SetLanguage" asp-route-returnUrl="@Context.Request.Path"
          method="post" class="form-horizontal" role="form">
        @Localizer["Language:"] <select name="culture"
                                        asp-for="@requestCulture.RequestCulture.UICulture.Name" asp-items="cultureItems"></select>
        <button type="submit" class="btn btn-default btn-xs">Save</button>

    </form>
</div>  

We want to display this partial on every page, so update the footer of your _Layout.cshtml to reference it:

<footer>  
    <div class="row">
        <div class="col-sm-6">
            <p>&copy; 2016 - Adding Localization</p>
        </div>
        <div class="col-sm-6 text-right">
            @await Html.PartialAsync("_SelectLanguagePartial")
        </div>
    </div>
</footer>  

Finally, we need to add the controller code to handle the user's selection. This currently maps to the SetLanguage action in the HomeController:

[HttpPost]
public IActionResult SetLanguage(string culture, string returnUrl)  
{
    Response.Cookies.Append(
        CookieRequestCultureProvider.DefaultCookieName,
        CookieRequestCultureProvider.MakeCookieValue(new RequestCulture(culture)),
        new CookieOptions { Expires = DateTimeOffset.UtcNow.AddYears(1) }
    );

    return LocalRedirect(returnUrl);
}

And that's it! If we fire up the home page of our application, you can see the culture selector in the bottom right corner. At this stage, I have not added any resource files, but if I trigger a validation error, you can see that the resource key is used for the resource itself:

Adding Localisation to an ASP.NET Core application

My development flow is not interrupted by having to go and mess with resource files, I can just develop the application using the default language and add resx files later in development. If I later add appropriate resource files for the fr culture, and a user changes their culture via the selector, I can see the effect of localisation in the validation attributes and other localised strings:

Adding Localisation to an ASP.NET Core application

As you can see, the validation attributes and page title are localised, but the label field 'Your Email' has not, as that is set in the DisplayAttribute. (Apologies to any french speakers - totally Google translate's fault if it's gibberish!)

Summary

In this post I showed how to add localisation to your ASP.NET Core application using the recommended approach of providing resources for the default language as keys, and only adding additional resources as required later.

In summary, the steps to localise your application are roughly as follows:

  1. Add the required localisation services
  2. Configure the localisation middleware and if necessary a culture provider
  3. Inject IStringLocalizer<T> into your controllers and services to localise strings
  4. Inject IViewLocalizer into your views to localise strings in views
  5. Add resource files for non-default cultures
  6. Add a mechanism for users to choose their culture

In the next post, I'll address some of the problems I've run into adding localisation to an application, namely the vulnerability of 'magic strings' to typos, and localising the DisplayAttribute.


Dominick Baier: New in IdentityServer4: Support for Extension Grants

Well – this is not completely new, but we redesigned it a bit.

Extension grants are used to add support for non-standard token issuance scenarios to the token endpoint, e.g. translating between token types, delegation, federation, custom input or output parameters.

One of the common questions we got was how to implement identity delegation – instead of repeating myself here – I wrote proper documentation on the topic, and how to use IdentityServer4 to implement it.

Get the details here.


Filed under: ASP.NET, IdentityServer, OAuth, WebAPI


Damien Bowden: Full Server logout with IdentityServer4 and OpenID Connect Implicit Flow

The article shows how to fully logout from IdentityServer4 using an OpenID Connect Implicit Flow. Per design when using an access token to use protected data from a resource server, even if the client has logged out from the server, the access token can be used so long it is valid (AccessTokenLifetime) as it is a consent. This it the normal use case.

Sometimes, it is required that once a user logs out from IdentityServer4, no client with the same user can continue to use the protected data without logging in again. Reference tokens can be used to implement this. With reference tokens, you have full control over the lifecycle.

Code: https://github.com/damienbod/AspNet5IdentityServerAngularImplicitFlow

Other posts in this series:

To use reference tokens in IdentityServer4, the client can be defined with the AccessTokenType property set to AccessTokenType.Reference. When a user and the client successfully login, a reference token as well as an id_token is returned to the client and not an access token and an id_token. (response_type: id_token token)

public static IEnumerable<Client> GetClients()
{
	// client credentials client
	return new List<Client>
	{
		new Client
		{
			ClientName = "angular2client",
			ClientId = "angular2client",
			AccessTokenType = AccessTokenType.Reference,
			AllowedGrantTypes = GrantTypes.Implicit,
			AllowAccessTokensViaBrowser = true,
			RedirectUris = new List<string>
			{
				"https://localhost:44311"

			},
			PostLogoutRedirectUris = new List<string>
			{
				"https://localhost:44311/Unauthorized"
			},
			AllowedCorsOrigins = new List<string>
			{
				"https://localhost:44311",
				"http://localhost:44311"
			},
			AllowedScopes = new List<string>
			{
				"openid",
				"dataEventRecords",
				"securedFiles"
			}
		}
	};
}

In IdentityServer4, when a user decides to logout, the IPersistedGrantService can be used to remove reference tokens for this user and client. The RemoveAllGrantsAsync method from the IPersistedGrantService uses the Identity subject and the client id to delete all of the corresponding grants. The GetSubjectId method is an IdentityServer4 extension method for the Identity. The HttpContext.User can be used to get this. The client id must match the client from the configuration.

using System;
using System.Collections.Generic;
using System.Linq;
using System.Security.Claims;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.Identity;
using Microsoft.AspNetCore.Mvc;
using Microsoft.AspNetCore.Mvc.Rendering;
using Microsoft.Extensions.Logging;
using IdentityServerWithAspNetIdentity.Models;
using IdentityServerWithAspNetIdentity.Models.AccountViewModels;
using IdentityServerWithAspNetIdentity.Services;
using IdentityServer4.Services;
using IdentityServer4.Quickstart.UI.Models;
using Microsoft.AspNetCore.Http.Authentication;
using IdentityServer4.Extensions;

namespace IdentityServerWithAspNetIdentity.Controllers
{
    [Authorize]
    public class AccountController : Controller
    {
        private readonly UserManager<ApplicationUser> _userManager;
        private readonly SignInManager<ApplicationUser> _signInManager;
        private readonly IEmailSender _emailSender;
        private readonly ISmsSender _smsSender;
        private readonly ILogger _logger;
        private readonly IIdentityServerInteractionService _interaction;
        private readonly IPersistedGrantService _persistedGrantService;

        public AccountController(
            IIdentityServerInteractionService interaction,
            IPersistedGrantService persistedGrantService,
            UserManager<ApplicationUser> userManager,
            SignInManager<ApplicationUser> signInManager,
            IEmailSender emailSender,
            ISmsSender smsSender,
            ILoggerFactory loggerFactory)
        {
            _interaction = interaction;
            _persistedGrantService = persistedGrantService;
            _userManager = userManager;
            _signInManager = signInManager;
            _emailSender = emailSender;
            _smsSender = smsSender;
            _logger = loggerFactory.CreateLogger<AccountController>();
        }
		
        /// <summary>
        /// Handle logout page postback
        /// </summary>
        [HttpPost]
        [ValidateAntiForgeryToken]
        public async Task<IActionResult> Logout(LogoutViewModel model)
        {
            var subjectId = HttpContext.User.Identity.GetSubjectId();

            // delete authentication cookie
            await HttpContext.Authentication.SignOutAsync();

          
            // set this so UI rendering sees an anonymous user
            HttpContext.User = new ClaimsPrincipal(new ClaimsIdentity());
            
            // get context information (client name, post logout redirect URI and iframe for federated signout)
            var logout = await _interaction.GetLogoutContextAsync(model.LogoutId);
            
            var vm = new LoggedOutViewModel
            {
                PostLogoutRedirectUri = logout?.PostLogoutRedirectUri,
                ClientName = logout?.ClientId,
                SignOutIframeUrl = logout?.SignOutIFrameUrl
            };


            await _persistedGrantService.RemoveAllGrantsAsync(subjectId, "angular2client");

            return View("LoggedOut", vm);
        }

The IdentityServer4.AccessTokenValidation NuGet package is used on the resource server to validate the reference token sent from the client. The IdentityServerAuthenticationOptions options are configured as required.

"IdentityServer4.AccessTokenValidation": "1.0.1-rc1"

This package is configured in the Startup class in the Configure method.

public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
{
	loggerFactory.AddConsole();
	loggerFactory.AddDebug();

	app.UseExceptionHandler("/Home/Error");
	app.UseCors("corsGlobalPolicy");
	app.UseStaticFiles();

	JwtSecurityTokenHandler.DefaultInboundClaimTypeMap.Clear();

	IdentityServerAuthenticationOptions identityServerValidationOptions = new IdentityServerAuthenticationOptions
	{
		Authority = "https://localhost:44318/",
		ScopeName = "dataEventRecords",
		ScopeSecret = "dataEventRecordsSecret",
		AutomaticAuthenticate = true,
		SupportedTokens = SupportedTokens.Both,
		// required if you want to return a 403 and not a 401 for forbidden responses

		AutomaticChallenge = true,
	};

	app.UseIdentityServerAuthentication(identityServerValidationOptions);

	app.UseMvc(routes =>
	{
		routes.MapRoute(
			name: "default",
			template: "{controller=Home}/{action=Index}/{id?}");
	});
}

The SPA client can then be used to login, logout from the server. If 2 or more clients with the same user are logged in, once the user logs out from the server, none will have access to the protected data. All existing reference tokens for this user and client can no longer be used to access the protected data.

By using reference tokens, you have full control over the access lifecycle to the protected data. Caution should be taken when using long running access tokens.

Another strategy would be to use short lived access tokens and make the client refresh this regularly. This reduces to time which an access token lives after a logout, but the access token can still be used to access the private data until it has timed out.

Links

http://openid.net/specs/openid-connect-core-1_0.html

http://openid.net/specs/openid-connect-implicit-1_0.html

https://github.com/IdentityServer/IdentityServer4/issues/313#issuecomment-247589782

https://github.com/IdentityServer/IdentityServer4

https://leastprivilege.com

https://github.com/IdentityServer/IdentityServer4/issues/313

https://github.com/IdentityServer/IdentityServer4/issues/310



Andrew Lock: Viewing what's changed in ASP.NET Core 1.0.1

Viewing what's changed in ASP.NET Core 1.0.1

On 13th September, Microsoft announced they are releasing an update to .NET Core they are calling .NET Core 1.0.1. Along with the framework update, they are also releasing 1.0.1 version of ASP.NET and Entity Framework Core. Details about the update can be found in a blog post by Microsoft.

The post does a good job of laying out the process you should take to update both your machine and applications. It also outlines the changes that have occurred, as well as the corresponding security advisory.

I was interested to know exactly what had changed in the source code between the different releases, in ASP.NET in particular. Luckily as all ASP.NET Core development is open source on GitHub, that's pretty easy to do!:)

Comparing between tags and branches in GitHub

A feature that is not necessarily that well known is the ability to compare between two tags and branches using a url of the form:

https://github.com/{username}/{repo}/compare/{older-tag}...{newer-tag}  

This presents you with a view of all the changes between the two provided tags. Alternatively, navigate to a repository, select branches, click the compare button and select the branches or tags to compare manually.

Viewing what's changed in ASP.NET Core 1.0.1

In the rest of the post, I'll give a rundown of the significant changes in the ASP.NET Core and EF Core libraries.

Changes in ASP.NET MVC

Most of the changes in the ASP.NET MVC repository are version changes of dependencies, incrementing from 1.0.0 to 1.0.1. Aside from these, there were a number of minor whitespace changes, comment changes, minor refactorings and additional unit tests. For the purpose of this post, I'm only going to focus on changes with a tangible difference to users. You can see the full diff here.

The first notable change is the handling of FIPS mode in the SHA256 provider. A static helper class, CryptographyAlgorithms, shown below, has been added to handle the case that you are running on a Windows machine with FIPS mode enabled. FIPS mode stands for 'Federal Information Processing Standard' and defines a set of approved cryptographic algorithms for US government computers - see here for a more detailed description. If you're not running applications on US federal computers, this change probably won't affect you.

using System.Security.Cryptography;

namespace Microsoft.AspNetCore.Mvc.TagHelpers.Internal  
{
    public static class CryptographyAlgorithms
    {
#if NETSTANDARD1_6
        public static SHA256 CreateSHA256()
        {
            var sha256 = SHA256.Create();

            return sha256;
        }
#else
        public static SHA256 CreateSHA256()
        {
            SHA256 sha256;

            try
            {
                sha256 = SHA256.Create();
            }
            // SHA256.Create is documented to throw this exception on FIPS compliant machines.
            // See: https://msdn.microsoft.com/en-us/library/z08hz7ad%28v=vs.110%29.aspx?f=255&MSPPError=-2147217396
            catch (System.Reflection.TargetInvocationException)
            {
                // Fallback to a FIPS compliant SHA256 algorithm.
                sha256 = new SHA256CryptoServiceProvider();
            }

            return sha256;
        }
#endif
    }
}

The second significant change is in MvcViewFeaturesMvcCoreBuilderExtensions. This class is called as part of the standard MVC service configuration for registering with the dependency injection container. The file has a single changed line, where the registration of ViewComponentResultExecutor is changed from singleton to transient.

Viewing what's changed in ASP.NET Core 1.0.1

I haven't dug into it further, but I suspect this is where the privilege elevation security bug mentioned in the announcement arose. This really shows how important it is to configure your service lifetimes correctly. This is especially important when adding additional dependency to an already existing and configured class, to be ensure you don't end up with captured transient dependencies.

The final main change in the MVC repository fixes this issue whereby a DELETE route is incorrectly matched against a GET method. The commit message for the commit fixing the issue gives a great explanation of the problem, which boiled down to an overloaded == operator giving incorrect behaviour in this method. The fix was to replace the implicit Equals calls with ReferenceEquals.

Changes in AntiForgery

Much like the MVC repository, the AntiForgery repository uses a SHA256 algorithm. In order to not throw a TargetInvocationException when calling SHA256.Create() on FIPS enabled hardware, it falls back to the SHA256CryptoServiceProvider. Again, if you are not running on US Federal government computers then this probably won't affect you. You can view the diff here.

Changes in KestrelHttpServer

There is a single change in the Kestrel web server that fixes this bug, whereby replacing the Request.Body or Response.Body stream of the HttpContext causes it to be replaced for all subsequent requests too. This simple fix solves the problem by ensuring the streams are reset correctly on each request:

Viewing what's changed in ASP.NET Core 1.0.1

Other changes

I've highlighted the changes in ASP.NET Core 1.0.1, but there are also a tonne of minor changes in the Entity Framework Core library, too many to list here. You can view the full change list here. Finally, the CoreCLR runtime has fixed these three bugs (here, here and here), and the templates in the dotnet CLI have been updated to use version 1.0.1 of various packages.

Summary

This was just a quick update highlighting the changes in ASP.NET Core. As you can see, the changes are pretty minimal, like you'd expect for a patch release. However the changes highlight a number of bugs to keep an eye out for when writing your own applications - namely incorrect service lifetimes and potential bugs when overloading the == operators.


Andrew Lock: HTML minification using WebMarkupMin in ASP.NET Core

HTML minification using WebMarkupMin in ASP.NET Core

It is common practice in web development to minify your static assets (CSS, JavaScript, images) as part of your deployment process. This reduces the amount of data being sent over the network, without changing the function of the code within.

In contrast, it is not very common to minify the HTML returned as part of a standard web request. In this post I show an easy way to add to add HTML minification to your ASP.NET Core application at runtime.

Why minify?

First of all, lets consider why we would want to minify HTML at all. It has been shown time and again that a slower page response leads to higher bounce rates, and that a more performant site has a direct benefit in terms of greater sales, so the speed with which we can render something on the user's browser is critical.

Minification is a performance optimisation, designed to reduce the amount of data being transferred over the network. At the simplest level it involves removing white-space, while more complex minifiers can perform operations such as variable renaming to reduce name lengths, and rewriting if-else constructs to use ternary expressions for example.

Javascript libraries are often available on public CDNs, so you can gain an additional performance boost there, avoiding having to serve files from your own servers at all.

HTML on the other hand will likely always need to come directly from your server. In addition, it will normally be the very first request sent as part of a page load, so getting the data back to the browser as fast as possible is critical.

Why isn't HTML minified by default?

Given that the benefits of reducing the size of data sent across the network seems clear, you may wonder why HTML isn't minimised by default.

The CSS and Javascript for a web application are typically fixed at build time, which gives the perfect opportunity to optimise the files prior to deployment. You can minify (and bundle) them once when you publish your application and know you won't have to update them again.

In contrast, the HTML returned by an application is often highly dynamic. Consider a simple ecommerce site - different HTML needs to be returned for the same url depending if the user is logged in or not, whether the product is on sale, whether the related products have changed etc etc.

Given the HTML is not static, we either have to minify the HTML in realtime as it is generated and sent across the network, or, if possible, minify the HTML portion of the templates from which we are generating the final markup.

In addition, it is sometimes pointed out that using compression on your server (e.g. GZIP) will already be significantly reducing the data sent across the network, and that minifying your html is not worth the work. It's true that GZIP is very effective, especially for a markup language like HTML, however there are still gains to be made, as I'll show below.

How much could we save?

About two years ago, Mads Kristensen wrote a series of posts about HTML minification, in one of which he demonstrated the savings that could be made by minifying as well as using GZIP on some HTML pages. I decided to recreate his experiment using the web pages as they are today, and got the following results:

ModeFile Size (KB)MinifiedGZIPMinified & GZIPSaving
amazon.com 35533277.172.95.4%
xbox.com 16110623.319.914.5%
twitter.com 72467577.165.015.6%
Default MVC Template Home Page7.35.31.91.85.3%

HTML minification using WebMarkupMin in ASP.NET Core

The results are broadly in line with those found by Mads. GZIP compression does perform the bulk of the work in reducing file size, but minifying the HTML prior to GZIP compression can reduce the file size by an additional 5-15%, which is not to be sniffed at!

Potential solutions

As mentioned before, in order to add HTML minification to your application you either need to minify the HTML at runtime as part of the pipeline, or you can minify the razor templates that are used to generate the final HTML.

Minifying razor templates

As with most software architecture choices, there are tradeoffs for each approach. Minifying the razor templates before publishing them seems like the most attractive option as it is a one-off compile time cost, and is in-keeping with the CSS and JavaScript best practices used currently. Unfortunately doing so requires properly parsing the razor syntax which, due to a few quirks, is not as trivial as might seem.

One such attempt is the ASP.NET Html Minifier by Dean Hume and is described in his blog post. It uses a standalone application to parse and minify your .cshtml files as part of the publish process. Under the hood, the majority of the processing is performed using regular expressions.

Another approach by Muhammed Rehan Saeed uses a gulp task to minify the .cshtml razor files on a publish. This also uses a regex approach to isolating the razor portions. Rehan has a blog post discussing the motivation for HTML minification in ASP.NET which is well worth reading. He also raised the issue with the MVC team regarding adding razor minification as part of the standard build process.

Minifying HTML at runtime

Minifying the razor templates seems like the most attractive solution, as it has zero overhead at runtime - the razor templates just happen to contain (mostly) already minified HTML before they are parsed and executed as part of a ViewResult. However, in some cases the razor syntax may cause the Regex minifiers to work incorrectly. As minification only occurs on publish, this could have the potential to cause bugs only in production, as development requires working with the unminified razor files.

Minifying the HTML just before it is served to the client (and before compression) is an easier process conceptually, as you are working directly with the raw HTML. You don't need to account for dynamic portions of the markup and you can be relatively sure about the results. Also, as the html is compressed as part of the pipeline, the razor templates can be pretty and unminified while you work with them, even if the resulting html is minified.

The downside to the runtime approach is the extra processing required for every request. No minification is done up front, and as the HTML may be different every time, the results of minification cannot be easily cached. This additional processing will inevitably add a small degree of latency. The tradeoff between the additional latency due to the extra processing and the reduced download time due to small data size is again something you will need to consider in general.

In the previous ASP.NET there were a couple of options to choose from for doing runtime minification of HTML, e.g. Meleze.Web or Mads' WhitespaceModule, but the only project I have found for ASP.NET Core is WebMarkupMin.

Adding Web Markup Min to your ASP.NET Core app

WebMarkupMin is a very mature minifier, not just for HTML but also XML and XHTML, as well as script and style tags embedded in your HTML. They provide multiple NuGet packages for hooking up your ASP.NET applications, both for ASP.NET 4.x using MVC, HttpModules, WebForms(!) and luckily for us, ASP.NET Core.

The first thing to get started with WebMarkupMin is to install the package in your project.json. Be sure to install the WebMarkupMin.AspNetCore1 package for ASP.NET Core (not the WebMarkupMin.Core package):

{
  "dependencies": {
    "WebMarkupMin.AspNetCore1": "2.1.0"
  }
}

The HTMl minifier is implemented as a standard ASP.NET Core middleware, so you register it in your application's Startup.Configure method as usual:

public void Configure(IApplicationBuilder app)  
{
    app.UseStaticFiles();

    app.UseWebMarkupMin();

    app.UseMvc(routes =>
    {
        routes.MapRoute(
            name: "default",
            template: "{controller=Home}/{action=Index}/{id?}");
    });
}

As always with ASP.NET Core middleware, order is important here. I chose to register the WebMarkupMin middleware after the static file handler. That means the minifier will not run if the static file handler serves a file. If you are serving html files (or angular templates etc) using the static file handler then you may want to move the minifier earlier in the pipeline.

The final piece of configuration for the middleware is to add the required services to the IoC container. The minification and compression services are opt-in, so you only add the minifiers you actually need. In this case I am going to add an HTML minifier and HTTP compression using GZIP, but will not bother with XML or XHTML minifiers as they are not being used.

public void ConfigureServices(IServiceCollection services)  
{
    services.AddMvc();

    services.AddWebMarkupMin(
        options =>
        {
            options.AllowMinificationInDevelopmentEnvironment = true;
            options.AllowCompressionInDevelopmentEnvironment = true;
        })
        .AddHtmlMinification(
            options =>
            {
                options.MinificationSettings.RemoveRedundantAttributes = true;
                options.MinificationSettings.RemoveHttpProtocolFromAttributes = true;
                options.MinificationSettings.RemoveHttpsProtocolFromAttributes = true;
            })
        .AddHttpCompression();
}

The services allow you to set a plethora of options in each case. I have chosen to enable minification and compression in development (rather than only in production), and have enabled a number of additional HTML minification options. These options will remove attributes from HTML elements where they are not required (e.g. the type="text" attribute on an input) and will strip the protocol from uri based attributes.

There are a whole host of additional options you can specify to control the HTML minification, which has excellent documentation on the GitHub wiki. Here you can control any number of additional parameters such as level of whitespace removal, preserving custom elements for e.g. angular templates, or defining required HTML comments for e.g. Knockout containerless binding directives.

With your services and middleware in place, you can run your application and see the sweet gains!

The image below shows the result of loading the xbox home page before and after adding WebMarkupMin to an ASP.NET Core application. I used the built in network throttle in Chrome Dev tools set to DSL (5ms latency, 2 Mbit/s download speed) to emulate a realistic network and compared the results before and after :

HTML minification using WebMarkupMin in ASP.NET Core

As you can see, adding the HTML minification + compression at runtime has a latency cost that is relatively significant when you add in minification, but this loss is well compensated for by the reduced size of the data transfer, giving an 80% reduction in total download time.

Summary

In this post I explained the motivation for HTML minification and showed the reduction in file size that could be achieved through HTML compression, with or without additional GZIP HTTP compression.

I described the options for using HTML minification, be that at publish or runtime, and presented tools to achieve both.

Finally I demonstrated how you could use WebMarkupMin in your ASP.NET Core application to enable HTML minification and HTTP compression, and showed the improvement in download time that it gives on a relatively large HTML file.

I hope you found the post useful, as usual you can find the source code for this and my other posts on GitHub at https://github.com/andrewlock/blog-examples. If you know of any other useful tools, do let me know in the comments. Thanks!


Dominick Baier: New in IdentityServer4: Default Scopes

Another small thing people have been asking for.

The scope parameter is optional in OAuth 2 – but we made the decision that clients always have to explicitly ask for the scopes they want to access.

We relaxed this requirement a bit in IdentityServer4. At the token endpoint, scope is now optional (IOW for client credentials, resource owner and extension grants requests). If no scope is specified – the client will automatically get a token that contains all explicitly allowed scopes (that’s a per client setting).

This makes it easier, especially for server to server type communication to provision new APIs without having to change the token requests in the clients.

Endpoint documentation here – Client settings here.

 


Filed under: .NET Security, ASP.NET, IdentityServer, OAuth, OpenID Connect, WebAPI


Dominick Baier: Identity & Access Control for ASP.NET Core Deep Dive

Once a year Brock and I do our three day version of the Identity & Access Control workshop in London.

This year it will be all about .NET Core and ASP.NET Core – and a full day on the new IdentityModel2 & IdentityServer4.

You can find the details and sign-up here – and there is an early bird ’til the 23rd September.

Really looking forward to this, since the extra day gives us so much more time for labs and going even deeper on the mechanics are architecture of modern identity and applications.

See you there!


Filed under: .NET Security, ASP.NET, IdentityModel, IdentityServer, OAuth, OpenID Connect, WebAPI


Dominick Baier: New in IdentityServer4: Clients without Secrets

Over the next weeks I will do short blog posts about new features in IdentityServer4. The primary intention is to highlight a new feature and then defer to our docs for the details (which will also force me to write some proper docs).

Clients without secrets
Many people asked for this. The OAuth 2 token endpoint does not require authentication for so called “public clients”. We always ignored that and always mandated some sort of secret (and not treating it as really secret for public clients).

In IdentityServer4 there is a new RequireClientSecret flag on the Client class where you can enable/disable the client secret requirement.

You can read about client settings here, and about secrets in general here.


Filed under: IdentityServer, OAuth, OpenID Connect, WebAPI


Andrew Lock: Configuring environment specific services in ASP.NET Core - Part 2

Configuring environment specific services in ASP.NET Core - Part 2

In my previous post, I showed how you could configure different services for dependency injection depending on the current hosting environment, i.e. whether you are currently running in Development or Production.

The approach I demonstrated required storing the IHostingEnvironment variable passed to the constructor of your Startup class, for use in the ConfigureServices method.

In this post I show an alternative approach, in which you use environment-specific methods on your startup class, rather than if-else statements in ConfigureServices.

The default Startup class

By convention, a standard ASP.NET Core application uses a Startup class for application setting configuration, setting up dependency injection, and defining the middleware pipeline. If you use the default MVC template when creating your project, this will produce a Startup class with a signature similar to the following:

public class Startup  
{
  public Startup(IHostingEnvironment env)
  {
    // Configuration settings
  }

  public void ConfigureServices(IServiceCollection services)
  {
    // Service dependency injection configuration
  }

  public void Configure(IApplicationBuilder app)
  {
    /// Middleware configuration
  }
}

Note that the Startup class does not implement any particular interface, or inherit from a base class. Instead, it is a simple class, which follows naming conventions for the configuration methods.

As a reminder, this class is referenced as part of the WebHostBuilder setup, which typically resides in Program.cs:

var host = new WebHostBuilder()  
    .UseKestrel()
    .UseContentRoot(Directory.GetCurrentDirectory())
    .UseStartup<Startup>()
    .Build();

host.Run();  

In addition to this standard format, there are some supplementary conventions that are particularly useful in the scenarios I described in my last post, where you want a different service injected depending on the runtime hosting environment. I'll come to the conventions shortly, but for now consider the following scenario.

I have an ISmsService my application uses to send SMS messages. In production I will need to use the full implementation, but when in development I don't want an SMS to be sent every time I test it, especially as it costs me money each time I use it. Instead, I need to use a dummy implementation of ISmsService.

Extension methods

In the previous post, I showed how you can easily use an if-else construct in your ConfigureServices method to meet these requirements. However, if you have a lot of these dummy services that need to be wired up, the method could quickly become long and confusing, especially in large apps with a lot of dependencies.

This can be mitigated to an extent if you use the extension method approach for configuring your internal services, similar to that suggested by K. Scott Allen in his post. In his post, he suggests wrapping each segment of configuration in an extension method, to keep the Startup.ConfigureServices method simple and declarative.

Considering the SMS example above, we might construct an extension method that takes in the hosting environment, and configures all the ancillary services. For example:

public static class SmsServiceExtensions  
{
  public static IServiceCollection AddSmsService(this IServiceCollection services, IHostingEnvironment env, IConfigurationRoot config)
  {
    services.Configure<SmsSettings>(config.GetSection("SmsSettings"));
    services.AddSingleton<ISmsTemplateFactory, SmsTemplateFactory>();
    if(env.IsDevelopment())
    {
      services.AddTransient<ISmsService, DummySmsService>();
    }
    else
    {
      services.AddTransient<ISmsService, SmsService>();
    }
    return services;
  }
}

These extension methods encapsulate a discrete unit of configuration, all of which would otherwise have resided in the Startup.ConfigureServices method, leaving your ConfigureServices method far easier to read:

public void ConfigureServices(IServiceCollection services)  
{
  services.AddMVC();
  services.AddSmsService(Environment, Configuration);
}

The downside to this approach is that your service configuration is now spread across many different classes and methods. Some people will prefer to have all the configuration code in the Startup.cs file, but still want to avoid the many if-else constructs for configuring Development vs Production dependencies.

Luckily, there is another approach at your disposal, by way of environment-specific Configure methods.

Environment-Specific method conventions

The ASP.NET Core WebHostBuilder has a number of conventions it follows when locating the configuration methods on the Startup class.

As we've seen, the Configure and ConfigureServices methods are used by default. The main advantage of not requiring an explicit interface implementation is that the WebHostBuilder can inject additional dependencies into these methods. However it also enables the selection of different methods depending on context.

As described in the documentation, the Startup class can contain environment specific configuration methods of the form Configure{EnvironmentName}() and Configure{EnvironmentName}Services().

If the WebHostBuilder detects methods of this form, they will be called preferentially to the standard Configure and ConfigureServices methods. We can use this to avoid the proliferation of if-else in our startup class. For example, considering the SMS configuration previously:

public class Startup  
{
  public void ConfigureServices(IServiceCollection services)
    {
        ConfigureCommonServices(services);
        services.AddTransient<ISmsService, SmsService>();
    }

    // This method gets called by the runtime. Use this method to add services to the container.
    public void ConfigureDevelopmentServices(IServiceCollection services)
    {
        ConfigureCommonServices(services);
        services.AddTransient<ISmsService, DummySmsService>();
    }

    private void ConfigureCommonServices(IServiceCollection services)
    {
        services.Configure<SmsSettings>(config.GetSection("SmsSettings"));
        services.AddSingleton<ISmsTemplateFactory, SmsTemplateFactory>();
    }
}

With this approach, we can just configure our alternative implementation services in the appropriate methods. At runtime the WebHostBuilder will check for the presence of a Configure{EnvironmentName}Services method.

When running in Development, ConfigureDevelopmentServices will be selected and the DummySmsService will be used. In any other environment, the default ConfigureServices will be called.

Note that we add all the service configuration that is common between environments to a private method ConfigureCommonServices, which is called by both configure methods. This prevents fragile duplication of configuration for services common between environments.

Environment-Specific class conventions

As well as the convention based methods in Startup, you can also take a convention-based approach for the whole Startup class. This allows you to completely separate your configuration code when in Development from other environments, by creating classes of the form Startup{Environment}.

For example, you can create a StartupDevelopment class and a Startup class - when you run in the Development environment, StartupDevelopment will be used for configuring your app and services. In other environments, Startup will be used.

So for example, we could have the following Startup Class

public class Startup  
{
  public Startup(IHostingEnvironment env)
  {
    // Configuration settings
  }

  public void ConfigureServices(IServiceCollection services)
  {
    // Service dependency injection configuration
    services.AddTransient<ISmsService, SmsService>();
  }

  public void Configure(IApplicationBuilder app)
  {
    /// Middleware configuration
  }
}

and an environment-specific version for the Development environment:

public class StartupDevelopment  
{
  public StartupDevelopment(IHostingEnvironment env)
  {
    // Configuration settings
  }

  public void ConfigureServices(IServiceCollection services)
  {
    // Service dependency injection configuration
    services.AddTransient<ISmsService, DummySmsService>();
  }

  public void Configure(IApplicationBuilder app)
  {
    /// Middleware configuration
  }
}

In order to use this convention based approached to your startup class, you need to use a different overload in the WebHostBuilder:

var assemblyName = typeof(Startup).GetTypeInfo().Assembly.FullName;

var host = new WebHostBuilder()  
    .UseKestrel()
    .UseContentRoot(Directory.GetCurrentDirectory())
    .UseStartup(assemblyName)
    .Build();

host.Run();  

Rather than using the generic UseStartup<T> method, we need to use the overload UseStartup(string startupAssemblyName). Under the hood, the WebHostBuilder will use reflection to find a Startup class in the provided assembly called Startup or Startup{Environment}. The environment-specific class will be used by default, falling back to the Startup class if no environment-specific version is found. If no candidate classes are found, the builder will throw an InvalidOperationException when starting your application.

Bear in mind that if you use this approach, you will need to duplicate any configuration that is common between environments, including application settings, service configuration and the middleware pipeline. If your application runs significantly differently between Development and Production then this approach may work best for you.

In my experience, the majority of the application configuration is common between all environments, with the exception of a handful of environment-specific services and middleware. Generally I prefer the if-else approach with encapsulation via extension methods as the application grows, but it is generally down to personal preference, and what works best for you.

Summary

In a previous post, I showed how you could use IHostingEnvironment to control which services are registered with the DI container at runtime, depending on the hosting environment.

In this post, I showed how you could achieve a similar result using naming conventions baked in to the WebHostBuilder implementation.

These conventions allow automatic selection of Configure{EnvironmentName} and Configure{Environment}Services methods in your Startup class depending on the current hosting environment.

Additionally, I showed the convention based approach to Startup class selection, whereby your application will automatically select a Startup class of the form Startup{Environment} if available.


Damien Bowden: ASP.NET Core Action Arguments Validation using an ActionFilter

This article shows how to use an ActionFilter to validate the model from a HTTP POST request in an ASP.NET Core MVC application.

Code: https://github.com/damienbod/Angular2AutoSaveCommands

Other articles in this series:

  1. Implementing UNDO, REDO in ASP.NET Core
  2. Angular 2 Auto Save, Undo and Redo
  3. ASP.NET Core Action Arguments Validation using an ActionFilter

In an ASP.NET Core MVC application, custom validation logic can be implemented in an ActionFilter. Because the ActionFilter is processed after the model binding in the action execution, the model and action parameters can be used in an ActionFilter without having to read from the Request Body, or the URL.

The model can be accessed using the context.ActionArguments dictionary. The key for the property has to match the parameter name in the MVC Controller action method. Ryan Nowak also explained in this issue, that the context.ActionDescriptor.Parameters can also be used to access the request payload data.

If the model is invalid, the context status code is set to 400 (bad request) and the reason is added to the context result using a ContentResult object. The request is then no longer processed but short circuited using the terminology from the ASP.NET Core documentation.

using System;
using System.IO;
using System.Text;
using Angular2AutoSaveCommands.Models;
using Microsoft.AspNetCore.Mvc;
using Microsoft.AspNetCore.Mvc.Filters;
using Microsoft.Extensions.Logging;

namespace Angular2AutoSaveCommands.ActionFilters
{
    public class ValidateCommandDtoFilter : ActionFilterAttribute
    {
        private readonly ILogger _logger;

        public ValidateCommandDtoFilter(ILoggerFactory loggerFactory)
        {
            _logger = loggerFactory.CreateLogger("ValidatePayloadTypeFilter");
        }

        public override void OnActionExecuting(ActionExecutingContext context)
        {
            var commandDto = context.ActionArguments["commandDto"] as CommandDto;
            if (commandDto == null)
            {
                context.HttpContext.Response.StatusCode = 400;
                context.Result = new ContentResult()
                {
                    Content = "The body is not a CommandDto type"
                };
                return;
            }

            _logger.LogDebug("validating CommandType");
            if (!CommandTypes.AllowedTypes.Contains(commandDto.CommandType))
            {
                context.HttpContext.Response.StatusCode = 400;
                context.Result = new ContentResult()
                {
                    Content = "CommandTypes not allowed"
                };
                return;
            }

            _logger.LogDebug("validating PayloadType");
            if (!PayloadTypes.AllowedTypes.Contains(commandDto.PayloadType))
            {
                context.HttpContext.Response.StatusCode = 400;
                context.Result = new ContentResult()
                {
                    Content = "PayloadType not allowed"
                };
                return;
            }

            base.OnActionExecuting(context);
        }
    }
}

The ActionFilter is added to the services in the Startup class. This is not needed if the ActionFilter is used directly in the MVC Controller.

services.AddScoped<ValidateCommandDtoFilter>();

The filter can then be used in the MVC Controller using the ServiceFilter attribute. If the commandDto model is invalid, a BadRequest response is returned without processing the business in the action method.

[ServiceFilter(typeof(ValidateCommandDtoFilter))]
[HttpPost]
[Route("Execute")]
public IActionResult Post([FromBody]CommandDto commandDto)
{
	_commandHandler.Execute(commandDto);
	return Ok(commandDto);
}

Links

https://docs.asp.net/en/latest/mvc/controllers/filters.html

https://github.com/aspnet/Mvc/issues/5260#issuecomment-245936046

https://github.com/aspnet/Mvc/blob/dev/src/Microsoft.AspNetCore.Mvc.Abstractions/Filters/ActionExecutingContext.cs



Damien Bowden: Angular 2 Auto Save, Undo and Redo

This article shows how to implement auto save, Undo and Redo commands in an Angular 2 SPA. The Undo and the Redo commands work for the whole application and not just for single components. The Angular 2 app uses an ASP.NET Core service implemented in the previous blog.

Code: https://github.com/damienbod/Angular2AutoSaveCommands

2016.08.19 Updated to Angular 2 release, ASP.NET Core 1.0.1

Other articles in this series:

  1. Implementing UNDO, REDO in ASP.NET Core
  2. Angular 2 Auto Save, Undo and Redo
  3. ASP.NET Core Action Arguments Validation using an ActionFilter

The CommandDto class is used for all create, update and delete HTTP requests to the server. This class is used in the different components and so the payload is always different. The CommandType defines the type of command to be executed. Possible values supported by the server are ADD, UPDATE, DELETE, UNDO, REDO. The PayloadType defines the type of object used in the Payload. The PayloadType is used by the server to convert the Payload object to a c# specific class object. The ActualClientRoute is used for the Undo, Redo functions. When an Undo command is executed, or a Redo, the next client path is returned in the CommandDto response. As this is an Angular 2 application, the Angular 2 routing value is used.

export class CommandDto {
    constructor(commandType: string, 
		 payloadType: string, 
		 payload: any, 
		 actualClientRoute: string) {
		 
        this.CommandType = commandType;
        this.PayloadType = payloadType;
        this.Payload = payload;
        this.ActualClientRoute = actualClientRoute;
    }

    CommandType: string;
    PayloadType: string;
    Payload: any;
    ActualClientRoute: string;
}

The CommandService is used to access the ASP.NET Core API implemented in the CommandController class. The service implements the Execute, Undo and Redo HTTP POST requests to the server using the CommandDto as the body. The service also implements an EventEmitter output which can be used to update child components, if an Undo command or a Redo command has been executed. When the function UndoRedoUpdate is called, the event is sent to all listeners.

import { Injectable, EventEmitter, Output } from '@angular/core';
import { Http, Response, Headers } from '@angular/http';
import 'rxjs/add/operator/map'
import { Observable } from 'rxjs/Observable';
import { Configuration } from '../app.constants';
import { CommandDto } from './CommandDto';

@Injectable()
export class CommandService {

    @Output() OnUndoRedo = new EventEmitter<string>();

    private actionUrl: string;
    private headers: Headers;

    constructor(private _http: Http, private _configuration: Configuration) {

        this.actionUrl = `${_configuration.Server}api/command/`;

        this.headers = new Headers();
        this.headers.append('Content-Type', 'application/json');
        this.headers.append('Accept', 'application/json');
    }

    public Execute = (command: CommandDto): Observable<CommandDto> => {
        let url = `${this.actionUrl}execute`;
        return this._http.post(url, command, { headers: this.headers }).map(res => res.json());
    }

    public Undo = (): Observable<CommandDto> => {
        let url = `${this.actionUrl}undo`;
        return this._http.post(url, '', { headers: this.headers }).map(res => res.json());
    }

    public Redo = (): Observable<CommandDto> => {
        let url = `${this.actionUrl}redo`;
        return this._http.post(url, '', { headers: this.headers }).map(res => res.json());
    }

    public GetAll = (): Observable<any> => {
        return this._http.get(this.actionUrl).map((response: Response) => <any>response.json());
    }
    
    public UndoRedoUpdate = (payloadType: string) => {
        this.OnUndoRedo.emit(payloadType);
    }
}

The app.component implements the Undo and the Redo user interface.

<div class="container" style="margin-top: 15px;">

    <nav class="navbar navbar-inverse">
        <div class="container-fluid">
            <div class="navbar-header">
                <a class="navbar-brand" [routerLink]="['/commands']">Commands</a>
            </div>
            <ul class="nav navbar-nav">
                <li><a [routerLink]="['/home']">Home</a></li>
                <li><a [routerLink]="['/about']">About</a></li>
                <li><a [routerLink]="['/httprequests']">HTTP API Requests</a></li>
            </ul>
            <ul class="nav navbar-nav navbar-right">
                <li><a (click)="Undo()">Undo</a></li>
                <li><a (click)="Redo()">Redo</a></li>
                <li><a href="https://twitter.com/damien_bod"><img src="assets/damienbod.jpg" height="40" style="margin-top: -10px;" /></a></li>               

            </ul>
        </div>
    </nav>

    <router-outlet></router-outlet>

    <footer>
        <p>
            <a href="https://twitter.com/damien_bod">twitter(damienbod)</a>&nbsp; <a href="https://damienbod.com/">damienbod.com</a>
            &copy; 2016
        </p>
    </footer>
</div>

The Undo method uses the _commandService to execute an Undo HTTP POST request. If successful, the UndoRedoUpdate function from the _commandService is executed, which broadcasts an update event in the client app, and then the application navigates to the route returned in the Undo commandDto response using the ActualClientRoute.

import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { CommandService } from './services/commandService';
import { CommandDto } from './services/commandDto';

@Component({
    selector: 'my-app',
    template: require('./app.component.html'),
    styles: [require('./app.component.scss'), require('../style/app.scss')]
})

export class AppComponent {

    constructor(private router: Router, private _commandService: CommandService) {
    }

    public Undo() {
        let resultCommand: CommandDto;

        this._commandService.Undo()
            .subscribe(
                data => resultCommand = data,
                error => console.log(error),
                () => {
                    this._commandService.UndoRedoUpdate(resultCommand.PayloadType);
                    this.router.navigate(['/' + resultCommand.ActualClientRoute]);
                }
            );
    }

    public Redo() {
        let resultCommand: CommandDto;

        this._commandService.Redo()
            .subscribe(
                data => resultCommand = data,
                error => console.log(error),
                () => {
                    this._commandService.UndoRedoUpdate(resultCommand.PayloadType);
                    this.router.navigate(['/' + resultCommand.ActualClientRoute]);
                }
            );
    }
}

The HomeComponent is used to implement the ADD, UPDATE, DELETE for the HomeData object. A simple form is used to add, or update the different items with an auto save implemented on the input element using the keyup event. A list of existing HomeData items are displayed in a table which can be updated or deleted.

<div class="container">
    <div class="col-lg-12">
        <h1>Selected Item: {{model.Id}}</h1>
        <form *ngIf="active" (ngSubmit)="onSubmit()" #homeItemForm="ngForm">

            <input type="hidden" class="form-control" id="id" [(ngModel)]="model.Id" name="id" #id="ngModel">
            <input type="hidden" class="form-control" id="deleted" [(ngModel)]="model.Deleted" name="deleted" #id="ngModel">

            <div class="form-group">
                <label for="name">Name</label>
                <input type="text" class="form-control" id="name" required  (keyup)="createCommand($event)" [(ngModel)]="model.Name" name="name" #name="ngModel">
                <div [hidden]="name.valid || name.pristine" class="alert alert-danger">
                    Name is required
                </div>
            </div>

            <button type="button" class="btn btn-default" (click)="newHomeData()">New Home</button>

        </form>
    </div>
</div>

<hr />

<div>

    <table class="table">
        <thead>
            <tr>
                <th>Id</th>
                <th>Name</th>
                <th></th>
                <th></th>
            </tr>
        </thead>
        <tbody>
            <tr style="height:20px;" *ngFor="let homeItem of HomeDataItems">
                <td>{{homeItem.Id}}</td>
                <td>{{homeItem.Name}}</td>
                <td>
                    <button class="btn btn-default" (click)="Edit(homeItem)">Edit</button>
                </td>
                <td>
                    <button class="btn btn-default" (click)="Delete(homeItem)">Delete</button>
                </td>
            </tr>
        </tbody>
    </table>

</div>

The HomeDataService is used to selected all the HomeData items using the ASP.NET Core service implemented in rhe HomeController class.

import { Injectable } from '@angular/core';
import { Http, Response, Headers } from '@angular/http';
import 'rxjs/add/operator/map'
import { Observable } from 'rxjs/Observable';
import { Configuration } from '../app.constants';

@Injectable()
export class HomeDataService {

    private actionUrl: string;
    private headers: Headers;

    constructor(private _http: Http, private _configuration: Configuration) {

        this.actionUrl = `${_configuration.Server}api/home/`;

        this.headers = new Headers();
        this.headers.append('Content-Type', 'application/json');
        this.headers.append('Accept', 'application/json');
    }

    public GetAll = (): Observable<any> => {
        return this._http.get(this.actionUrl).map((response: Response) => <any>response.json());
    }
 
}

The HomeComponent implements the different CUD operations and also the listeners for Undo, Redo events, which are relevant for its display. When a keyup is received, the createCommand is executed. This function adds the data to the keyDownEvents subject. A deboucedInput Observable is used together with debounceTime, so that only when the user has not entered any inputs for more than a second, a command is sent to the server using the OnSumbit function.

The component also subscribes to the OnUndoRedo event sent from the _commandservice. When this event is received, the OnUndoRedoRecieved is called. The function updates the table with the actual data if the undo, redo command has changed data displayed in this component.

import { Component, OnInit } from '@angular/core';
import { FormControl } from '@angular/forms';
import { Http } from '@angular/http';
import { HomeData } from './HomeData';
import { CommandService } from '../services/commandService';
import { CommandDto } from '../services/commandDto';
import { HomeDataService } from '../services/homeDataService';

import { Observable } from 'rxjs/Observable';
import { Subject } from 'rxjs/Subject';

import 'rxjs/add/observable/of';
import 'rxjs/add/observable/throw';

// Observable operators
import 'rxjs/add/operator/catch';
import 'rxjs/add/operator/debounceTime';
import 'rxjs/add/operator/distinctUntilChanged';
import 'rxjs/add/operator/do';
import 'rxjs/add/operator/filter';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/switchMap';

@Component({
    selector: 'homecomponent',
    template: require('./home.component.html')
})

export class HomeComponent implements OnInit {

    public message: string;
    public model: HomeData;
    public submitted: boolean;
    public active: boolean;
    public HomeDataItems: HomeData[];

    private deboucedInput: Observable<string>;
    private keyDownEvents = new Subject<string>();

    constructor(private _commandService: CommandService, private _homeDataService: HomeDataService) {
        this.message = "Hello from Home";
        this._commandService.OnUndoRedo.subscribe(item => this.OnUndoRedoRecieved(item));
    }

    ngOnInit() {
        this.model = new HomeData(0, 'name', false);
        this.submitted = false;
        this.active = true;
        this.GetHomeDataItems();

        this.deboucedInput = this.keyDownEvents;
        this.deboucedInput
            .debounceTime(1000)       
            .distinctUntilChanged()   
            .subscribe((filter: string) => {
                this.onSubmit();
            });
    }

    public GetHomeDataItems() {
        console.log('HomeComponent starting...');
        this._homeDataService.GetAll()
            .subscribe((data) => {
                this.HomeDataItems = data;
            },
            error => console.log(error),
            () => {
                console.log('HomeDataService:GetAll completed');
            }
        );
    }

    public Edit(aboutItem: HomeData) {
        this.model.Name = aboutItem.Name;
        this.model.Id = aboutItem.Id;
    }

    // TODO remove the get All request and update the list using the return item
    public Delete(homeItem: HomeData) {
        let myCommand = new CommandDto("DELETE", "HOME", homeItem, "home");

        console.log(myCommand);
        this._commandService.Execute(myCommand)
            .subscribe(
            data => this.GetHomeDataItems(),
            error => console.log(error),
            () => {
                if (this.model.Id === homeItem.Id) {
                    this.newHomeData();
                }
            }   
            );
    }

    public createCommand(evt: any) {
        this.keyDownEvents.next(this.model.Name);
    }

    // TODO remove the get All request and update the list using the return item
    public onSubmit() {
        if (this.model.Name != "") {
            this.submitted = true;
            let myCommand = new CommandDto("ADD", "HOME", this.model, "home");

            if (this.model.Id > 0) {
                myCommand.CommandType = "UPDATE";
            }

            console.log(myCommand);
            this._commandService.Execute(myCommand)
                .subscribe(
                data => {
                    this.model.Id = data.Payload.Id;
                    this.GetHomeDataItems();
                },
                error => console.log(error),
                () => console.log('Command executed')
                );
        }       
    }

    public newHomeData() {
        this.model = new HomeData(0, 'add a new name', false);
        this.active = false;
        setTimeout(() => this.active = true, 0);
    }

    private OnUndoRedoRecieved(payloadType) {
        if (payloadType === "HOME") {
            this.GetHomeDataItems();
           // this.newHomeData();
            console.log("OnUndoRedoRecieved Home");
            console.log(payloadType);
        }       
    }
}

When the application is built (both server and client) and started, the items can be added, updated or deleted using the commands.

angular2autosaveundoredo_01

The executed commands can be viewed using the commands tab in the Angular 2 application.

angular2autosaveundoredo_03

And the commands or the data can also be viewed in the SQL database.

angular2autosaveundoredo_02

Links

http://blog.thoughtram.io/angular/2016/02/22/angular-2-change-detection-explained.html

https://angular.io/docs/ts/latest/guide/forms.html



Andrew Lock: Configuring environment specific services for dependency injection in ASP.NET Core

Configuring environment specific services for dependency injection in ASP.NET Core

In this short post I show how you can configure dependency injection so that different services will be injected depending if you are in a development or production environment.

tl;dr - save the IHostingEnvironment injected in your Startup constructor for use in your ConfigureServices method.

Why would you want to?

There are a whole number of possibilities for why you might want to do this, but fundamentally it's becuase you want things to work differently in production than in development. For example when you're running and testing in a development environment:

  • You probably don't want emails to be sent to customers
  • You might not want to rely on an external service
  • You might not want to use live authentication details for an external service
  • You might not want to have to use 2FA every time you login to your app.

Many of these issues can be handled with simple configuration - for example you may point to a local development mail server instead of the production mail server for email. This is simple to do with the new configuration system and is a great option in many cases.

However sometimes configuration just can't handle everything you need. For example, say call an external api to retrieve currency rates. If that api costs money, you obviously don't want to be calling it in development. However you can't necessarily just use configuration to point to a different endpoint - you would need a different endpoint that delivers data in the same format etc which is likely not available to you.

Instead, a better way to handle the issue would be to use a facade around the external api call, and creating two separate implementations - one that uses the external api, the other that just returns some dummy data. In production you can use the live api, while in development you can use the dummy service, without having to worry about the external service at all.

For example, you might create code similar to the following:

public interface ICurrencyRateService  
{
    ICollection<CurrencyRate> GetCurrencyRates();
}

public class ExternalCurrencyRateService : ICurrencyRateService  
{
    private readonly IExternalService _service;
    public ExternalCurrencyRateService(IExternalService service)
    {
        _service = service;
    }

    ICollection<CurrencyRate> GetCurrencyRates()
    {
        return _service.GetRates();
    }
}

public class DummyCurrencyRateService : ICurrencyRateService  
{
    public ICollection<CurrencyRate> GetCurrencyRates()
    {
        return new [] {
            new CurrencyRate {
                Currency = "GBP",
                Rate = 1.00
            },
            new CurrencyRate {
                Currency = "USD",
                Rate = 1.31
            }
            // ...more currencies
        };
    }
}

In these classes we define an interface, a live implementation which uses the IExternalService, and a dummy service which just returns back some dummy data.

A first attempt

In my first attempt to hook these two services up, I tried just injecting the IHostingEnvironment directly into the ConfifgureServices call in my Startup class, like so:

public void ConfigureServices(IServiceCollection services, IHostingEnvironment env)  
{
    // Add required services.
}

Many methods in the ASP.NET Core framework allow this kind of dependency injection at the method level. For example you can inject services into the Startup.Configure method when configuring your app, or into the Invoke method when creating custom middleware. Unfortunately in this case, the method must be exactly as described, otherwise your app will crash on startup with the following error:

Unhandled Exception: System.InvalidOperationException: The ConfigureServices method  
must either be parameterless or take only one parameter of type IServiceCollection.  

Doh! This does kind of make sense as until you have configured the services by calling the method, you don't have a service provider to use to inject them!

The right way

Luckily, we have a simple alternative. The Startup class itself may contain a constructor which accepts an instance of IHostingEnvironment. By convention this method creates the IConfigurationRoot using a ConfigurationBuilder and saves it to a property on Startup called Configuration.

We can easily take a similar approach for IHostingEnvironment by saving it to a property on Startup, for use later in ConfigureServices. Our Startup method would look something like this:

using Microsoft.AspNetCore.Builder;  
using Microsoft.AspNetCore.Hosting;  
using Microsoft.Extensions.Configuration;  
using Microsoft.Extensions.DependencyInjection;  
using Microsoft.Extensions.Logging;

public class Startup  
{
    public Startup(IHostingEnvironment env)
    {
        Configuration = new ConfigurationBuilder()
            .SetBasePath(env.ContentRootPath)
            .AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
            .Build();

        HostingEnvironment = env;
    }

    public IConfigurationRoot Configuration { get; }
    public IHostingEnvironment HostingEnvironment { get; }

    public void ConfigureServices(IServiceCollection services)
    {
        if (HostingEnvironment.IsDevelopment())
        {
            services.AddTransient<ICurrencyRateService, DummyCurrencyRateService>();
        }
        else
        {
            services.AddTransient<ICurrencyRateService, ExternalCurrencyRateService>();
        }

        // other services
    }

    public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
    {
            // middleware configuration
    }
}

As you can see, we simply save the inject IHostingEnvironment in the constructor to the HostingEnvironment property. Later, in the ConfigureServices method, we can check the environment, and if in a development setting, inject the appropriate ICurrencyRateService.

Summary

The technique shown above will not always be necessary, and generally speaking configuration will probably be a simple and more intuitive route to handling different behaviour based on environment. However the dependency injection container is also a great point to switch out your services. If you are using a third party container there are often other ways of achieving the same effect with their native APIs, for example the profiles feature in StructureMap.


Dominick Baier: IdentityServer4 RC1

Wow – we’re done! Brock and I spent the last two weeks 14h/day refactoring, polishing, testing and refining IdentityServer for ASP.NET Core…and I must say it’s the best STS we’ve written so far…

We kept the same approach as before, that IdentityServer takes care of all the hard things like protocol handling, validation, token generation, data management and security – while you only need to model your application architecture via scopes, clients and users. But at the same time we give you much more flexibility for handling custom scenarios, workflows and user interactions. We also made it easier to get started.

There are too many new features to talk about all of them in this post – but to give you an overview:

  • integration in ASP.NET Core’s pipeline, DI system, configuration, logging and authentication handling
  • complete separation of protocol handling and UI thus allowing you to easily modify the UI in any way you want
  • simplified persistence layer
  • improved key material handling enabling automatic key rotation and remote signing scenarios
  • allowing multiple grant types per client
  • revamped support for extension grants and custom protocol responses
  • seamless integration into ASP.NET Core Identity (while retaining the ability to use arbitrary other data sources for your user management)
  • support for public clients (clients that don’t need a client secret to use the token endpoint)
  • support for default scopes when requesting tokens
  • support for ASP.NET Core authentication middleware for external authentication
  • improved session management and authentication cookie handling
  • revamped and improved support for CORS
  • re-worked middleware for JWT and reference token validation
  • tons of internal cleanup

We will have separate posts detailing those changes in the coming weeks.

Where to start?
Our new website https://identityserver.io will bring you to all the relevant sites: documentation, github repo and our new website for commercial support options.

Add the IdentityServer package to you project.json:

“IdentityServer4”: “1.0.0-rc1”

and start coding ;)

We also added a number of quickstart tutorials that walk you through common scenarios:

Everything is still work in progress, but we have the feeling we are really close to how we want the final code to look and feel.

Give it a try – and give us feedback on the issue tracker. Release notes can be found here.

Have fun!


Filed under: .NET Security, ASP.NET, IdentityServer, OAuth, OpenID Connect, WebAPI


Andrew Lock: An introduction to OpenID Connect in ASP.NET Core

An introduction to OpenID Connect in ASP.NET Core

This post is the next in a series of posts on authentication in ASP.NET Core. In the previous post we showed how you can use the OAuth 2.0 protocol to provide 'Login via Facebook' functionality to your website.

While a common approach, there are a number of issues with using OAuth as an authentication protocol, rather than the authorisation protocol it was designed to be.

Open ID Connect adds an additional layer on top of the OAuth protocol that solves a number of these problems. In this post we take a look at the differences between OpenID Connect and OAuth, how to use Open ID Connect in your ASP.NET Core application, and how to register your application with an OpenID Connect provider (in this case, Google).

What is OpenID Connect?

OpenID Connect is a simple identity layer that works over the top of OAuth 2.0. It uses the same underlying REST protocol, but adds consistency and additional security on top of the OAuth protocol.

It is also worth noting that OpenID Connect is a very different protocol to OpenID. The later was an XML based protocol, which follows similar approaches and goals to OpenID Connect but in a less developer-friendly way.

Why use it instead of OAuth 2.0?

In my recent post I showed how you could use OAuth 2.0 to login with Facebook on your ASP.NET Core application. You may be thinking 'why do I need another identity layer, OAuth 2.0 works perfectly well?'. Unfortunately there are a few problems with OAuth 2.0 as an authentication mechanism.

First of all, OAuth 2.0 is fundamentally an authorisation protocol, not an authentication protocol. It's entire design is based around providing access to some protected resource (e.g. Facebook Profile, or Photos) to a third party (e.g. your ASP.NET Core application).

When you 'Login with Facebook' we are doing a pseudo-authentication, by proving that you can provide access to the protected resource. Nat Sakimura explains it brilliantly on his blog, when he says using OAuth for authentication is like giving someone a valet key to your house. By being able to produce a key to your house, the website is able to assume that you are a given person, but you haven't really been properly authenticated as such. Also, that website now has a key to your house! That latter point is one of the major security concerns around OAuth 2.0 - there are various mitigations in place, but they don't address the fundamental concern.

OpenID Connect handles this issue in OAuth 2.0 by essentially only providing a key to a locker that contains your identity proof. Rather than granting access to your whole house, the locker is all you can get to.

Secondly, OAuth 2.0 is very loose in it's requirements for implementation. The specification sets a number of technical details, but there are many subtly different implementations across various providers. Just take a look at the number of providers available in the AspNet.Security.OAuth.Providers repository to get a feel for it. Each of those providers requires some degree of customisation aside from specifying urls and secrets. Each one returns data in a different format and must have the returned Claims parsed. OpenID Connect is far more rigid in its requirements, which allows a great deal of interoperability.

Finally, OpenID Connect provides additional features that enhance security such as signing of web tokens and verification that a given token was assigned to your application. It also has a discovery protocol which allows your website to dynamically register with a new OpenID Connect Provider, without having to explicitly pre-register your application with them.

Where it is available, it really seems like the best advice is to always choose over OpenID Connect over plain OAuth. Indeed, Dominick Baier, of Identity Server fame (among other things), says pretty much this on his blog:

...we always saw OpenID Connect as a “super-set” of OAuth 2.0 and always recommended against using OAuth without the OIDC parts.

The Flow

In terms of the protocol flow between the user, your ASP.NET application and the identity provider when using OpenID Connect, it is essentially the same as the OAuth 2.0 flow I outlined in the previous article on OAuth 2.0. As mentioned previously, OpenID Connect builds on top of OAuth 2.0, so it probably shouldn't be that surprising!

An introduction to OpenID Connect in ASP.NET Core

As before there are multiple different possible flows depending on your application type (e.g. mobile app, website, single page application etc), but the standard website flow is essentially identical to OAuth 2.0. This version typically still requires you register your application with the provider before adding it to your website, but allows automatic configuration of the endpoint urls in your website through a service discovery protocol. You just need to set the domain (Authority in spec parlance) at which the configuration can be found and your application can set everything else up for you.

Under the covers there are some subtle differences in the data getting sent back and forth between your application and the authorisation servers, but this is largely hidden from you as a consuming developer. The scope parameter has an additional openid value to indicate that it is a OpenID Connect request and the ACCESS_CODE response contains an id_token which is used to verify the integrity of the data. Finally, the request to the resource server to fetch any additional claims returns claims in a standardised way, using preset claim keys such as given_name, family_name and email. This spares you the implementation-specific mapping of claims that is necessary with OAuth 2.0.

Adding OpenID Connect to your application

Hopefully by now you are convinced of the benefits OpenID Connect can provide, so lets look at adding it to an ASP.NET Core project.

As before, I'll assume you have an ASP.NET Core project, built using the default 'Individual user accounts' MVC template.

The first thing is to add the OpenID Connect package to your project.json:

{
  "dependencies": {
    "Microsoft.AspNetCore.Authentication.OpenIdConnect": "1.0.0"
  }
}

and configure the middleware in your Startup.Configure method:

public void Configure(IApplicationBuilder app, IHostingEnvironment env)  
{

    app.UseStaticFiles();

    app.UseIdentity();

    app.UseOpenIdConnectAuthentication(new OpenIdConnectOptions
        {
            ClientId = Configuration["ClientId"],
            ClientSecret = Configuration["ClientSecret"],
            Authority = Configuration["Authority"],
            ResponseType = OpenIdConnectResponseType.Code,
            GetClaimsFromUserInfoEndpoint = true
        });

    app.UseMvc(routes =>
    {
        routes.MapRoute(
            name: "default",
            template: "{controller=Home}/{action=Index}/{id?}");
    });
}

We created a new OpenIdConnectOptions object, added the ClientId and ClientSecret we received when registering our app with the OpenID Connect provider (more on that below), and specified the Authority which indicates the actual OpenID Connect provider we are using. As usual, we loaded these values from configuration, which should be stored in the user secrets manager when developing.

For the remainder of the article I'll assume you are configuring Google as your provider, so in this case the 'Authority' value would be "https://accounts.google.com". With the middleware in place, we have everything we need for a basic 'Login via Google' OpenID Connect implementation.

When the user gets to the login page, they will see the option to login using 'OpenIdConnect'. Obviously in production you would probably want to update that to something more user-friendly!

An introduction to OpenID Connect in ASP.NET Core

The user is then presented with their usual google login screen (if not already logged in) and asked to authorise your ASP.NET application:

An introduction to OpenID Connect in ASP.NET Core

Clicking 'Allow' will redirect the user back to your ASP.NET application with an AUTH_CODE. You app can then communicate through the back channel to Google to authenticate the user, and to sign them in to your application.

Registering your application with Google

Just like when we were configuring Facebook to be an OAuth 2.0 provider for our application, we need to register our application with Google before we can use OpenID Connect.

The first step is to visit http://console.developers.google.com and sign up as a developer. Once you are logged in and configured, you can register your app. Click 'Project' and 'Create Project' from the top menu

An introduction to OpenID Connect in ASP.NET Core

You will need to give your application a name and agree to the terms and conditions:

An introduction to OpenID Connect in ASP.NET Core

Now you need to generate some credentials for your application so we can obtain the necessary CLIENTID and CLIENTSECRET. Click 'Credentials' in the left bar, and if necessary, select your project. You can then create credentials for your project. For an ASP.NET Core website you will want to select the OAuth client ID option:

An introduction to OpenID Connect in ASP.NET Core

Next, choose Web application from the available options, provide a name, and a redirect URI. This URI will be the domain at which your application will be deployed (in my case http://localhost:5000) followed by /signin-oidc (by default)

An introduction to OpenID Connect in ASP.NET Core

On clicking create, you will be presented with your CLIENTID and CLIENTSECRET. Simply store these in your user secrets and you're good to go!

Summary

In this post we saw how to add sign in using OpenID Connect to an ASP.NET Core application. We outlined the differences of the OpenID Connect protocol compared to OAuth 2.0 and highlighted the security and development benefits over plain OAuth. Finally, we showed how to register your application with Google to obtain your Client Id and Secret.


Ben Foster: Automatic post-registration sign-in with Identity Server

Identity Server is an open source framework that allows implementing Single sign-on and supports a number of modern authentication protocols such as OpenID Connect and OAuth2.

Identity Server was created by the guys at Thinktecture and has now become the Microsoft recommended approach for providing centralised authentication and access-control in ASP.NET.

A few months ago I started to investigate replacing our hand-rolled auth system with Identity Server. We had a number of services in our platform and were already making use of OAuth2 to authenticate client applications in our API. We were using a domain level authentication cookie to share authenticated sessions between 2 of our apps but as more services were introduced, each with their own set of authentication requirements, this was no longer a viable solution.

Registering Users

Identity Server does not perform user registration so the typical flow when registering users is:

  1. User registers on your web site (store user in DB)
  2. After registration user is redirected to Identity Server to sign in
  3. User is redirected back to your web site

Identity Server provides support for ASP.NET Identity and Membership reboot and if you're not using one of these frameworks, you can provide your own custom services.

In our system we wanted a slightly different flow, whereby our customers were not required to sign in again following registration:

  1. User registers on our marketing site
  2. User is automatically signed in to Identity Server
  3. User is redirected to our Dashboard and automatically signed in

Once the user is signed into Identity Server we can transparently sign the user into the Dashboard application by disabling the IdSrv consent screen. Here's our client configuration:

return new List<Client>
{
    new Client
    {
        ClientName = "Marketing",
        ClientId = "marketing",
        Enabled = true,
        Flow = Flows.Implicit,
        AccessTokenType = AccessTokenType.Reference,
        RedirectUris = new List<string>
        {
            "http://localhost:51962/",
        },
        AllowAccessToAllScopes = true,
        RequireConsent = false
    },
    new Client
    {
        ClientName = "Dashboard",
        ClientId = "dashboard",
        Enabled = true,
        Flow = Flows.Implicit, 
        AccessTokenType = AccessTokenType.Reference,
        RedirectUris = new List<string>
        {
            "http://localhost:49902/"
        },
        PostLogoutRedirectUris = new List<string>
        {
            "http://localhost:49902/"
        },
        AccessTokenLifetime = 36000, // 10 hours
        AllowAccessToAllScopes = true,
        RequireConsent = false
    }
}

Implementing automatic sign-in

To implement automatic sign-in we need to do the following:

  1. During registration generate a One-Time-Access-Code (OTAC) and store this against our new user along with an expiry date.
  2. Redirect the user to the Dashboard including the OTAC in the URL (if you want to sign-in to the same app you can skip this step).
  3. Authenticate the user (redirects to Identity Server) sending the OTAC in the acr_values parameter (more info).
  4. Identity Server validates the token and signs the user in transparently (no consent screen).
  5. User is redirected back to the dashboard.

Generating the OTAC

I'm using the default ASP.NET MVC template with ASP.NET Identity and have updated my Register action as below:

public async Task<ActionResult> Register(RegisterViewModel model)
{
    if (ModelState.IsValid)
    {
        var user = new ApplicationUser { UserName = model.Email, Email = model.Email };
        var result = await UserManager.CreateAsync(user, model.Password);
        if (result.Succeeded)
        {
            var otac = user.GenerateOTAC(TimeSpan.FromMinutes(1));
            UserManager.Update(user);

            // Redirect to dashboard providing OTAC
            return Redirect("http://localhost:49902/auth/login?otac=" + Url.Encode(otac));
        }

        AddErrors(result);
    }

    // If we got this far, something failed, redisplay form
    return View(model);
}

Here we create the new user and set the OTAC. The OTAC generation is handled directly inside my user class:

public class ApplicationUser : IdentityUser
{
    public string OTAC { get; set; }
    public DateTime? OTACExpires { get; set; }

    public string GenerateOTAC(TimeSpan validFor)
    {
        var otac = CryptoRandom.CreateUniqueId();
        var hashed = Crypto.Hash(otac);
        OTAC = hashed;
        OTACExpires = DateTime.UtcNow.Add(validFor);

        return otac;
    }

    // ... ommitted for brevity
}

This makes use of some of the helpers from the IdentityModel package to generate a unique identifier and hash the value before it is stored. The unhashed value is returned to our controller and passed in the URL when redirecting to the dashboard.

Sending the OTAC to Identity Server

Identity Server provides the acr_values parameter to provide additional authentication information to the user service. We'll use this to send our OTAC.

After registration the user is redirected to the Dashboard login page. Here we check to see if an OTAC is provided and if so, add it to the OWIN context. This will be later retrieved before sending the authentication request to Identity Server:

public void LogIn(string otac = null)
{
    var ctx = HttpContext.GetOwinContext();

    if (!string.IsNullOrEmpty(otac))
    {
        ctx.Set("otac", otac);
    }

    var properties = new AuthenticationProperties
    {
        RedirectUri = Url.Action("index", "home", null, Request.Url.Scheme)
    };

    ctx.Authentication.Challenge(properties);
}

To set the acr_values parameter we need to hook into the RedirectToIdentityProvider notification hook provided by the Open ID Connect middleware. In startup.cs:

app.UseOpenIdConnectAuthentication(new OpenIdConnectAuthenticationOptions
{
    Authority = "http://localhost:49788/",
    ClientId = "dashboard",
    RedirectUri = "http://localhost:49902/",
    ResponseType = "id_token token",
    Scope = "openid profile email api.read api.write",
    SignInAsAuthenticationType = "Cookies",
    PostLogoutRedirectUri = "http://localhost:49902/",

    Notifications = new OpenIdConnectAuthenticationNotifications
    {
        RedirectToIdentityProvider = n =>
        {
            if (n.ProtocolMessage.RequestType == OpenIdConnectRequestType.AuthenticationRequest)
            {
                var otac = n.OwinContext.Get<string>("otac");
                if (otac != null)
                {
                    n.ProtocolMessage.AcrValues = otac;
                }
            }

            return Task.FromResult(0);
        }
    }
});

RedirectToIdentityProvider is invoked just before we redirect to Identity Server. This is where we are able to customise the request. In the above code we retrieve the OTAC from the Owin Context and set the AcrValues property.

Validating the token and signing the user in

The next step involves customising the default authentication behaviour of Identity Server. Normal authentication requests should work as before, but in the case of post-registration requests, we need to jump in before the default authentication behaviour is executed.

Identity Server defines the IUserService interface to abstract the underlying identity management system being used for users. Rather than implementing this from scratch, and since we're using ASP.NET Identity, we can instead create a class that derives from AspNetIdentityUserService<TUser, TKey>.

To change the default login behaviour we need to override PreAuthenticateAsync:

This method is called before the login page is shown. This allows the user service to determine if the user is already authenticated by some out of band mechanism (e.g. client certificates or trusted headers) and prevent the login page from being shown.

Here is my complete implementation:

public class UserService : AspNetIdentityUserService<ApplicationUser, string>
{
    public UserService(UserManager userManager) : base(userManager)
    {
    }

    public override async Task PreAuthenticateAsync(PreAuthenticationContext context)
    {
        var otac = context.SignInMessage.AcrValues.FirstOrDefault();
        if (otac != null && context.SignInMessage.ClientId == "dashboard")
        {
            var hashed = Crypto.Hash(otac);
            var user = FindUserByOTAC(hashed);

            if (user != null && user.ValidateOTAC(hashed))
            {
                var claims = await GetClaimsFromAccount(user);
                context.AuthenticateResult = new AuthenticateResult(user.Id, user.UserName, claims: claims, authenticationMethod: "oidc");

                // Revoke token
                user.RevokeOTAC();
                await userManager.UpdateAsync(user);

                return;
            }
        }


        await base.PreAuthenticateAsync(context);
    }

    protected async override Task<IEnumerable<Claim>> GetClaimsFromAccount(ApplicationUser user)
    {
        var claims = (await base.GetClaimsFromAccount(user)).ToList();

        if (!string.IsNullOrWhiteSpace(user.UserName))
        {
            claims.Add(new System.Security.Claims.Claim("name", user.UserName));
        }

        return claims;
    }

    private ApplicationUser FindUserByOTAC(string otac)
    {
        return userManager.Users.FirstOrDefault(u => u.OTAC.Equals(otac));
    }
}

In the PreAuthenticateAsync method we check to see if an OTAC is provided and whether the request came from our dashboard. We then attempt to load the user with the provided OTAC and if the code is valid, revoke it, set the AuthenticateResult and short-circuit the request.

OTAC validation and revocation is handled by our User class:

public bool ValidateOTAC(string otac)
{
    if (string.IsNullOrEmpty(otac) || string.IsNullOrEmpty(OTAC))
    {
        return false;
    }

    return OTAC.Equals(otac)
        && OTACExpires != null
        && OTACExpires > DateTime.UtcNow;
}

public void RevokeOTAC()
{
    OTAC = null;
    OTACExpires = null;
}

In order for Identity Server to use our custom user service we need to register it with the service factory. In startup.cs:

var factory = new IdentityServerServiceFactory()
.UseInMemoryClients(Clients.Get())
.UseInMemoryScopes(Scopes.Get());

// Wire up ASP.NET Identity 
factory.Register(new Registration<UserManager>());
factory.Register(new Registration<UserStore>());
factory.Register(new Registration<ApplicationDbContext>());

// Custom User Service
factory.UserService = new Registration<IUserService, UserService>();

The user is transparently signed-in and redirected back to the dashboard.

Demo

To prove that everything is working as described, here's a short demo I recorded. It demonstrates the normal login flow to the dashboard, registration with consent screen disabled and registration with consent screen enabled (just so the flow is more obvious).

Thanks

Special thanks to Dominick Baier, who helped significantly with the above implementation. Sorry it took so long for the blog post!


Andrew Lock: POST-REDIRECT-GET using TempData in ASP.NET Core

POST-REDIRECT-GET using TempData in ASP.NET Core

In this post I will show how you can use Session state and TempData to implement the POST-REDIRECT-GET (PRG) design pattern in your ASP.NET Core application.

Disclaimer - The technique shown here, while working very well in the previous version of ASP.NET, is not as simple in ASP.NET Core. This is due to the fact that the TempData object is a wrapper around Session which is itself a wrapper around the IDistributedCache interface. This interface requires you to serialise your objects to and from a byte array before storage, where previously serialisation was not necessary. Consequently there are some trade-offs required in this implementation, so be sure you understand the implications.

What is PRG?

The POST-REDIRECT-GET (PRG) design pattern states that a POST should be answered with a REDIRECT response, to which the user's browser will follow with a GET request. It is designed to reduce the number of duplicate form submissions caused by users refreshing their browser and navigating back and forth.

No doubt in your general internet travels you will have refreshed a page and seen a popup similar to the following:

POST-REDIRECT-GET using TempData in ASP.NET Core

This occurs when the response returned from a POST is just content, with no REDIRECT. When you click reload, the browser attempts to resend the last request, which in this case was a POST. In some cases this may be the desired behaviour, but in my experience it invariably is not!

Luckily, as suggested, handling this case is simple when the form data submitted in the post is valid and can be handled correctly. Simply return a redirect response from your controller actions to a new page. So for example, consider we have a simple form on our home page which can POST an EditModel. If the form is valid, then we redirect to the Success action, instead of returning a View result directly. That way if the user reloads the screen, they replay the GET request to Success instead of the POST to Index.

public class HomeController : Controller  
{
    public IActionResult Index()
    {
        return View(new EditModel());
    }

    [HttpPost]
    public IActionResult Index(EditModel model)
    {
        if (!ModelState.IsValid)
        {
            return View(model);
        }
        return RedirectToAction("Success");
    }

    public IActionResult Success()
    {
        return View();
    }
}

Handling invalid forms

Unfortunately the waters get a little more muddy when the form data you have submitted is not valid. As PRG is primarily intended to prevent double form submissions, it does not necessarily follow that you should REDIRECT a user if the form is invalid. In that case, the request should not be modifying state, and so it is valid to submit the form again.

In MVC, this has generally been the standard way of handling invalid forms. In the example above, we check the ModelState.IsValid property in our POST handler, and if not valid, we simply redisplay the form, using the current ModelState to populate the validation helpers etc. This is a conceptually simple solution, that still allows us to use PRG when the post is successful.

Unfortunately, this approach has some drawbacks. It is still quite possible for users to be hit with the (for some, no-doubt confusing) 'Confirm form resubmission' popup.

Consider the controller above. A user can submit the form, where if invalid we return the validation errors on the page. The user then reloads the page and is shown the 'Confirm form resubmission' popup:

POST-REDIRECT-GET using TempData in ASP.NET Core

It is likely the user expected reloading the page to actually reload the page and clear the previously entered values, rather than resubmitting the form. Luckily, we can use PRG to produce that behaviour and to provide a cleaner user experience.

Using TempData to save ModelState

The simple answer may seem to be changing the View(model) statement to be RedirectToAction("Index") - that would satisfy the PRG requirement and prevent form resubmissions. However doing that would cause a 'fresh' GET on the Index page, so we would lose the previously entered input fields and all of the validation errors - not a nice user experience at all!

In order to display the validation messages and input values we need to somehow preserve the ModelStateDictionary exposed as ModelState in the controller. In ASP.NET 4.X, that is relatively easy to do using the TempData structure, which stores data in the Session for the current request and the next one, after which it is deleted.

Matthew Jones has an excellent post on using TempData to store and rehydrate the ModelState when doing PRG in ASP.NET 4.X, which was the inspiration for this post. Unfortunately there are some limitations in ASP.NET Core which make the application of his example slightly less powerful, but hopefully still sufficient in the majority of cases.

Serialising ModelState to TempData

The biggest problem here is that ModelState is not generally serialisable. As discussed in this GitHub issue, ModelState can contain Exceptions which themselves may not be serialisable. This was not an issue in ASP.NET 4.X as TempData would just store the ModelState object itself, rather than having to serialise at all.

To get around this, we have to extract the details we care about from the ModelStateDictionary, serialise those details, and then rebuild the ModelStateDictionary from the serialised representation on the next request.

To do this, we can create a simple serialisable transport class, which contains only the details we need to redisplay the form inputs correctly:

public class ModelStateTransferValue  
{
    public string Key { get; set; }
    public string AttemptedValue { get; set; }
    public object RawValue { get; set; }
    public ICollection<string> ErrorMessages { get; set; } = new List<string>();
}

All we store is the Key (the field name) the RawValue and AttemptedValue (the field values) and the ErrorMessages associated with the field. These map directly to the equivalent fields in ModelStateDictionary.

Note that the RawValue type is an object, which again leaves us with the problem that ModelStateTransferValue may not be serialisable. I haven't come across any times where this is the case but it is something to be aware of if you are using some complex objects in your view models.

We then create a helper class to allow us to serialise the ModelSateDictionary to and from TempData. When serialising, we first convert it to a collection of ModelStateTransferValue and then serialise these to a string. On deserialisation, we simply perform the process in reverse:

public static class ModelStateHelpers  
{
    public static string SerialiseModelState(ModelStateDictionary modelState)
    {
        var errorList = modelState
            .Select(kvp => new ModelStateTransferValue
            {
                Key = kvp.Key,
                AttemptedValue = kvp.Value.AttemptedValue,
                RawValue = kvp.Value.RawValue,
                ErrorMessages = kvp.Value.Errors.Select(err => err.ErrorMessage).ToList(),
            });

        return JsonConvert.SerializeObject(errorList);
    }

    public static ModelStateDictionary DeserialiseModelState(string serialisedErrorList)
    {
        var errorList = JsonConvert.DeserializeObject<List<ModelStateTransferValue>>(serialisedErrorList);
        var modelState = new ModelStateDictionary();

        foreach (var item in errorList)
        {
            modelState.SetModelValue(item.Key, item.RawValue, item.AttemptedValue);
            foreach (var error in item.ErrorMessages)
            {
                modelState.AddModelError(item.Key, error);
            }
        }
        return modelState;
    }
}

ActionFilters for exporting and importing

With these helpers in place, we can now create the ActionFilters where we will store and rehydrate the model data. These filters are almost identical to the ones proposed by Matthew Jones in his post, just updated to ASP.NET Core constructs, and calling our ModelStateHelpers as required:

public abstract class ModelStateTransfer : ActionFilterAttribute  
{
    protected const string Key = nameof(ModelStateTransfer);
}

public class ExportModelStateAttribute : ModelStateTransfer  
{
    public override void OnActionExecuted(ActionExecutedContext filterContext)
    {
        //Only export when ModelState is not valid
        if (!filterContext.ModelState.IsValid)
        {
            //Export if we are redirecting
            if (filterContext.Result is RedirectResult 
                || filterContext.Result is RedirectToRouteResult 
                || filterContext.Result is RedirectToActionResult)
            {
                var controller = filterContext.Controller as Controller;
                if (controller != null && filterContext.ModelState != null)
                {
                    var modelState = ModelStateHelpers.SerialiseModelState(filterContext.ModelState);
                    controller.TempData[Key] = modelState;
                }
            }
        }

        base.OnActionExecuted(filterContext);
    }
}

public class ImportModelStateAttribute : ModelStateTransfer  
{
    public override void OnActionExecuted(ActionExecutedContext filterContext)
    {
        var controller = filterContext.Controller as Controller;
        var serialisedModelState = controller?.TempData[Key] as string;

        if (serialisedModelState != null)
        {
            //Only Import if we are viewing
            if (filterContext.Result is ViewResult)
            {
                var modelState = ModelStateHelpers.DeserialiseModelState(serialisedModelState);
                filterContext.ModelState.Merge(modelState);
            }
            else
            {
                //Otherwise remove it.
                controller.TempData.Remove(Key);
            }
        }

        base.OnActionExecuted(filterContext);
    }
}

The ExportModelStateAttribute runs after an Action has executed, checks whether the ModelState was invalid and if the returned result was a redirect result. If it was, then it serialises the ModelState and stores it in TempData.

The ImportModelStateAttribute also runs after an Action has executed, checks we have a serialised model state and that we are going to execute a ViewResult. If so, then it deserialises to state to a ModelStateDictionary and merges it into the existing ModelState.

We can simply apply these attributes to our HomeController to give PRG on invalid forms, if we also update the !ModelState.IsValid case to redirect to Index:

public class HomeController : Controller  
{
    [ImportModelState]
    public IActionResult Index()
    {
        return View(new EditModel());
    }

    [HttpPost]
    [ExportModelState]
    public IActionResult Index(EditModel model)
    {
        if (!ModelState.IsValid)
        {
            return RedirectToAction("Index");
        }
        return RedirectToAction("Success");
    }
}

The result

We're all set to give this a try now. Previously, if we submitted a form with errors, then reloading the page would give us the 'Confirm form resubmission' popup. This was because the POST was being resent to the server, as we can see by viewing the Network tab in Chrome:

POST-REDIRECT-GET using TempData in ASP.NET Core

See those POSTS returning a 200? That's what we're trying to avoid. With our new approach, errors in the form cause a redirect to the Index page, followed by a GET request by the browser. The form fields and validation errors are all still visible, even though this is a normal GET request.

POST-REDIRECT-GET using TempData in ASP.NET Core

Out POST now returns a 302, which is followed by a GET. Now if the user refreshes the page, the page will actually refresh, clearing all the input values and validation errors and giving you a nice clean form, with no confusing popups!

POST-REDIRECT-GET using TempData in ASP.NET Core

Summary

This post shows how you can implement PRG for all your POSTs in ASP.NET Core. Whether you actually want to have this behaviour is another question which is really up to you. It allows you to avoid the annoying popups, but on the other hand it is not (and likely will not be) a pattern that is directly supported by the ASP.NET Core framework itself. The ModelState serialisation requirement is a tricky problem which may cause issues for you in some cases, so use it with caution!

To be clear, you absolutely should be using the PRG pattern for successful POSTs, and this approach is completely supported - just return a RedirectResult from your Action method. The choice of whether to use PRG for invalid POSTs is down to you.


Taiseer Joudeh: Integrate Azure AD B2C with ASP.NET MVC Web App – Part 3

This is the third part of the tutorial which will cover Using Azure AD B2C tenant with ASP.NET Web API 2 and various front-end clients.

The source code for this tutorial is available on GitHub.

The MVC Web App has been published on Azure App Services, so feel free to try it out using the Base URL (https://aadb2cmvcapp.azurewebsites.net/)

I promise you that I won’t share your information with anyone, feel free to try the experience 🙂

Integrate Azure AD B2C with ASP.NET MVC Web App

In the previous post, we have configured our Web API to rely on our Azure AD B2C IdP to secure it so only calls which contain a token issued by our IdP will be accepted by our Web API.

In this post we will build our first front-end application (ASP.NET MVC 5 Web App) which will consume the API endpoints by sending a valid token obtained from the Azure AD b2C tenant, as well it will allow anonymous users to create profiles, and sign in against the Azure B2C tenant. The MVC Web app itself will be protected as well by the same Azure AD B2C tenant as we will share the same tenant Id between the Web API and MVC Web app.

So let’s start building the MVC Web App.

Step 1: Creating the MVC Web App Project

Let’s add a new ASP.NET Web application named “AADB2C.WebClientMvc” to the solution named “WebApiAzureAcitveDirectoryB2C.sln”, then add new MVC ASP.NET Web application, the selected template for the project will be “MVC”, and do not forget to change the “Authentication Mode” to “No Authentication” check the image below:

Azure B2C Web Mvc Template

Once the project has been created, click on it’s properties and set “SSL Enabled” to “True”, copy the “SSL URL” value and right lick on project, select “Properties”, then select the “Web” tab from the left side and paste the “SSL URL” value in the “Project Url” text field and click “Save”. We need to allow https scheme locally once we debug the application. Check the image below:

MvcWebSSLEnable

Step 2: Install the needed NuGet Packages to Configure the MVC App

We need to add bunch of NuGet packages, so Open NuGet Package Manager Console and install the below packages:

Install-Package Microsoft.Owin.Security.OpenIdConnect -Version 3.0.1
Install-Package Microsoft.Owin.Security.Cookies -Version 3.0.1
Install-Package Microsoft.Owin.Host.SystemWeb -Version 3.0.1
Update-package Microsoft.IdentityModel.Protocol.Extensions

The package “Microsoft.Owin.Security.OpenIdConnect” contains the middleware used to protect web apps with OpenId Connect, this package contains the logic for the heavy lifting happens when our MVC App will talk with Azure B2C tenant to request tokens and validate them.

The package “Microsoft.IdentityModel.Protocol.Extension” contains classes which represent OpenID Connect constants and messages, lastly the package “Microsoft.Owin.Security.Cookies” will be used to create a cookie based session after obtaining a valid token from our Azure AD B2C tenant. This cookie will be sent from the browser to the server with each subsequent request and get validate by the cookie middleware.

Step 3: Configure Web App to use Azure AD B2C tenant IDs and Policies

Now we need to modify the web.config for our MVC App  by adding the below keys, so open Web.config and add the below AppSettings keys:

<add key="ida:Tenant" value="BitofTechDemo.onmicrosoft.com" />
    <add key="ida:ClientId" value="bc348057-3c44-42fc-b4df-7ef14b926b78" />
    <add key="ida:AadInstance" value="https://login.microsoftonline.com/{0}/v2.0/.well-known/openid-configuration?p={1}" />
    <add key="ida:SignUpPolicyId" value="B2C_1_Signup" />
    <add key="ida:SignInPolicyId" value="B2C_1_Signin" />
    <add key="ida:UserProfilePolicyId" value="B2C_1_Editprofile" />
    <add key="ida:RedirectUri" value="https://localhost:44315/" />
    <add key="api:OrdersApiUrl" value="https://localhost:44339/" />

The usage for the each setting has been outlined in the previous post, the only 2 new settings keys are: “ida:RedirectUri” which will be used to set the OpenID connect “redirect_uri” property The value of this URI should be registered in Azure AD B2C tenant (we will do this next), this redirect URI will be used by the OpenID Connect middleware to return token responses or failures after authentication process, as well after the sign out process. The second setting key “api:OrdersApiUrl” will be used as a base URI for our Web API.

Now let’s register the new Redirect URI in Azure B2C tenant, to do so login to Azure Portal and navigate to the App “Bit of Tech Demo App” we already registered in the previous post, then add the value “https://localhost:44315/” in the Reply URL settings as the image below, note that I already published the MVC web App to Azure App Services to the URL (https://aadb2cmvcapp.azurewebsites.net/) so I’ve included this URL too.

B2C Mvc Reply URL

Step 4: Add Owin “Startup” Class

The default MVC template comes without a “Startup” class, but we need to configure our OWIN OpenID Connect middleware at the start of our Web App, so add a new class named “Startup” and paste the code below, there is a lot of code here so jump to the next paragraph as I will do my best to explain what we have included in this class.

public class Startup
    {
        // App config settings
        private static string clientId = ConfigurationManager.AppSettings["ida:ClientId"];
        private static string aadInstance = ConfigurationManager.AppSettings["ida:AadInstance"];
        private static string tenant = ConfigurationManager.AppSettings["ida:Tenant"];
        private static string redirectUri = ConfigurationManager.AppSettings["ida:RedirectUri"];

        // B2C policy identifiers
        public static string SignUpPolicyId = ConfigurationManager.AppSettings["ida:SignUpPolicyId"];
        public static string SignInPolicyId = ConfigurationManager.AppSettings["ida:SignInPolicyId"];
        public static string ProfilePolicyId = ConfigurationManager.AppSettings["ida:UserProfilePolicyId"];

        public void Configuration(IAppBuilder app)
        {
            ConfigureAuth(app);
        }

        public void ConfigureAuth(IAppBuilder app)
        {
            app.SetDefaultSignInAsAuthenticationType(CookieAuthenticationDefaults.AuthenticationType);

            app.UseCookieAuthentication(new CookieAuthenticationOptions() );

            // Configure OpenID Connect middleware for each policy
            app.UseOpenIdConnectAuthentication(CreateOptionsFromPolicy(SignUpPolicyId));
            app.UseOpenIdConnectAuthentication(CreateOptionsFromPolicy(ProfilePolicyId));
            app.UseOpenIdConnectAuthentication(CreateOptionsFromPolicy(SignInPolicyId));
        }

        // Used for avoiding yellow-screen-of-death
        private Task AuthenticationFailed(AuthenticationFailedNotification<OpenIdConnectMessage, OpenIdConnectAuthenticationOptions> notification)
        {
            notification.HandleResponse();
            if (notification.Exception.Message == "access_denied")
            {
                notification.Response.Redirect("/");
            }
            else
            {
                notification.Response.Redirect("/Home/Error?message=" + notification.Exception.Message);
            }

            return Task.FromResult(0);
        }

        private OpenIdConnectAuthenticationOptions CreateOptionsFromPolicy(string policy)
        {
            return new OpenIdConnectAuthenticationOptions
            {
                // For each policy, give OWIN the policy-specific metadata address, and
                // set the authentication type to the id of the policy
                MetadataAddress = String.Format(aadInstance, tenant, policy),
                AuthenticationType = policy,
              
                // These are standard OpenID Connect parameters, with values pulled from web.config
                ClientId = clientId,
                RedirectUri = redirectUri,
                PostLogoutRedirectUri = redirectUri,
                Notifications = new OpenIdConnectAuthenticationNotifications
                {
                    AuthenticationFailed = AuthenticationFailed
                },
                Scope = "openid",
                ResponseType = "id_token",

                // This piece is optional - it is used for displaying the user's name in the navigation bar.
                TokenValidationParameters = new TokenValidationParameters
                {
                    NameClaimType = "name",
                    SaveSigninToken = true //important to save the token in boostrapcontext
                }
            };
        }
    }

What we have implemented here is the following:

  • From line 4-12 we have read the app settings for the keys we have included in MVC App web.config where they represent Azure AD B2C tenant and policy names, note that policy names access modifiers are set to public as it will be referenced in another class.
  • Inside the method “ConfigureAuth” we have done different things as the following:
    • Line 
      app.SetDefaultSignInAsAuthenticationType(CookieAuthenticationDefaults.AuthenticationType)
       will configure the OWIN security pipeline and inform the OpenID connect middleware that the default authentication type we will use is”Cookies”, and this means that the “Claims” encoded in the token we will receive from Azure AD B2C tenant will be stored in a Cookie (Session for the authenticated user).
    • Line 
      app.UseCookieAuthentication(new CookieAuthenticationOptions());
       will register a cookie authentication middleware instance with default options, this means that Authentication type here is equivalent to the same authentication type we set in the previous step. it will be “Cookies” too.
    • Lines 
      app.UseOpenIdConnectAuthentication
       are used to configure the OWIN security pipeline to use the authentication provider (Azure AD B2C) per policy, in our case, there will be 3 different policies we already defined.
  • The method 
    CreateOptionsFromPolicy
     will take the Policy name as input parameter and will return an object of type “OpenIdConnectAuthenticationOptions”, This object is responsible for controlling the OpenID Connect middleware. The properties we used to configure the instance of “OpenIdConnectAuthenticationOptions” as the below:
    • The
      MetadataAddress
       property will accept the address of the discovery document endpoint for our Azure AD B2C tenant per policy, so for example, the discovery endpoint for policy “B2C_1_Signup” will be “https://login.microsoftonline.com/BitofTechDemo.onmicrosoft.com/v2.0/.well-known/openid-configuration?p=B2C_1_Signup”. This discovery document will be used to get information from Azure AD B2C on how to generate authentication requests and validated incoming token responses.
    • The 
      AuthenticationType
       property will inform the middleware that authentication operation used is the policies we already defined, so for example if you defined a forth policy and you didn’t register it with the OpenID connect middleware, the tokens issues by this policy will be rejected.
    • The 
      ClientId
       property will tell Azure AD B2C which ID to use to match the requests originating from the Web App. This will represent the Azure AD B2C tenant we defined earlier in the previous posts.
    • The 
      RedirectUri
       property will inform the Azure AD B2C where your app wants the requested token response to be returned to, the value of this URL should be registered previously in the “ReplyURLs” values in Azure AD B2C App we defined earlier.
    • The 
      PostLogoutRedirectUri
       property will inform Azure AD B2C where to redirect the browser after a sign out operation completed successfully.
    • The 
      Scope
       property will be used to inform our Azure AD B2C tenant that our web app needs to use “OpenId Connect” protocol for authentication.
    • The 
      ResponseType
       property will indicate what our Web App needs from Azure AD B2C tenant after this authentication process, in our case, we only need an 
      id_token
    • The 
      TokenValidationParameters
       is used to store the information needed to validate the tokens, we only need to change 2 settings here, the 
      NameClaimType
       and the 
      SaveSigninToken
       . Setting the “NameClaimType” value to “name” will allow us to read the display name of the user by calling 
      User.Identity.Name
       , and setting the “SaveSigninToken” to “true” will allow us to save the token we received from the authentication process in the claims created (Inside the session cookie), this will be useful to retrieve the token from the claims when we want to call the Web API. Keep in mind that the cookie size will get larger as we are storing the token inside it.
    • Lastly, the property 
      Notifications
       will allow us to inject our custom code during certain phases of the authentication process, the phase we are interested in here is the 
      AuthenticationFailed
       phase, in this phase we want to redirect the user to the root directory of the Web App in case he/she clicked cancel on the sign on or sign in forms, and we need to redirect to the error view if we received any other exception during the authentication process.

This was the most complicated part in configuring our Web App to use our Azure AD B2C tenant. Now the next steps should be simpler and we will modify some views and add some new actions to issue requests to our Web API and call the Azure AD B2C polices.

Step 5: Call the Azure B2C Polices

Now we need to configure out Web App to invoke the policies we created, to do so we need to add a new controller named “AccountController”, so add it and paste the code below:

public class AccountController : Controller
    {
        public void SignIn()
        {
            if (!Request.IsAuthenticated)
            {
                // To execute a policy, you simply need to trigger an OWIN challenge.
                // You can indicate which policy to use by specifying the policy id as the AuthenticationType
                HttpContext.GetOwinContext().Authentication.Challenge(
                    new AuthenticationProperties() { RedirectUri = "/" }, Startup.SignInPolicyId);
            }
        }

        public void SignUp()
        {
            if (!Request.IsAuthenticated)
            {
                HttpContext.GetOwinContext().Authentication.Challenge(
                    new AuthenticationProperties() { RedirectUri = "/" }, Startup.SignUpPolicyId);
            }
        }

        public void Profile()
        {
            if (Request.IsAuthenticated)
            {
                HttpContext.GetOwinContext().Authentication.Challenge(
                    new AuthenticationProperties() { RedirectUri = "/" }, Startup.ProfilePolicyId);
            }
        }

        public void SignOut()
        {
            // To sign out the user, you should issue an OpenIDConnect sign out request
            if (Request.IsAuthenticated)
            {
                IEnumerable<AuthenticationDescription> authTypes = HttpContext.GetOwinContext().Authentication.GetAuthenticationTypes();
                HttpContext.GetOwinContext().Authentication.SignOut(authTypes.Select(t => t.AuthenticationType).ToArray());
            }
        }
    }

What we have implemented here is simple, and it is the same for actions 

SignIn
 , 
SignUp
 , and 
Profile
 , what we have done is a call to the 
Challenge
 method and specify the related Policy name for each action.

The “Challenge” method in the OWIN pipeline accepts an instance of the object

AuthenticationProperties()
  which is used to set the settings of the action we want to do (Sign in, Sign up, Edit Profile). We only set the “RedirectUri” here to the root path of our Web App, taking into consideration that this “RedirectUri” has nothing to do with the “RedirectUri” we have defined in Azure AD B2C. This can be a different URI where you want the browser to redirect the user only after a successful operation takes place.

Regarding the 

SignOut
 action, we need to Signout the user from different places, one by removing the app local session we created using the “Cookies” authentication and the other one by informing the OpenID connect middleware to send a Sign out request message to our Azure AD B2C tenant so the user is signed out from there too, that’s why we are retrieving all the Auth types available for our Web App and then we pass those different authentication types to the the “SignOut” method.

Now let’s add a partial view which renders the links to call those actions, so add a new partial view named “_LoginPartial.cshtml” under the “Shared” folder and paste the code below:

@if (Request.IsAuthenticated)
{
    <text>
        <ul class="nav navbar-nav navbar-right">
            <li>
                <a id="profile-link">@User.Identity.Name</a>
                <div id="profile-options" class="nav navbar-nav navbar-right">
                    <ul class="profile-links">
                        <li class="profile-link">
                            @Html.ActionLink("Edit Profile", "Profile", "Account")
                        </li>
                    </ul>
                </div>
            </li>
            <li>
                @Html.ActionLink("Sign out", "SignOut", "Account")
            </li>
        </ul>
    </text>
}
else
{
    <ul class="nav navbar-nav navbar-right">
        <li>@Html.ActionLink("Sign up", "SignUp", "Account", routeValues: null, htmlAttributes: new { id = "signUpLink" })</li>
        <li>@Html.ActionLink("Sign in", "SignIn", "Account", routeValues: null, htmlAttributes: new { id = "loginLink" })</li>
    </ul>
}

Notice that part of partial view will be rendered only if the user is authenticated and notice how we are displaying the user “Display Name” from the claim named “name” by only calling 

@User.Identity.Name

Now we need to reference this partial view in the “_Layout.cshtml” view, we need just to replace the last Div in the body section with the below section:

<div class="navbar-collapse collapse">
	<ul class="nav navbar-nav">
		<li>@Html.ActionLink("Home", "Index", "Home")</li>
		<li>@Html.ActionLink("Orders List", "Index", "Orders")</li>
	</ul>
	@Html.Partial("_LoginPartial")
</div>

Step 6: Call the Web API from the MVC App

Now we want to add actions to start invoking the protected API we’ve created by passing the token obtained from Azure AD B2C tenant in the “Authorization” header for each protected request. We will add support for creating a new order and listing all the orders related to the authenticated user. If you recall from the previous post, we will depend on the claim named “objectidentifer” to read the User ID value encoded in the token as a claim.

To do so we will add a new controller named “OrdersController” under folder “Controllers” and will add 2 actions methods named “Index” and “Create”, add the file and paste the code below:

[Authorize]
    public class OrdersController : Controller
    {
        private static string serviceUrl = ConfigurationManager.AppSettings["api:OrdersApiUrl"];

        // GET: Orders
        public async Task<ActionResult> Index()
        {
            try
            {

                var bootstrapContext = ClaimsPrincipal.Current.Identities.First().BootstrapContext as System.IdentityModel.Tokens.BootstrapContext;

                HttpClient client = new HttpClient();

                client.BaseAddress = new Uri(serviceUrl);

                client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", bootstrapContext.Token);

                HttpResponseMessage response = await client.GetAsync("api/orders");

                if (response.IsSuccessStatusCode)
                {

                    var orders = await response.Content.ReadAsAsync<List<OrderModel>>();

                    return View(orders);
                }
                else
                {
                    // If the call failed with access denied, show the user an error indicating they might need to sign-in again.
                    if (response.StatusCode == System.Net.HttpStatusCode.Unauthorized)
                    {
                        return new RedirectResult("/Error?message=Error: " + response.ReasonPhrase + " You might need to sign in again.");
                    }
                }

                return new RedirectResult("/Error?message=An Error Occurred Reading Orders List: " + response.StatusCode);
            }
            catch (Exception ex)
            {
                return new RedirectResult("/Error?message=An Error Occurred Reading Orders List: " + ex.Message);
            }
        }

        public ActionResult Create()
        {
            return View();
        }

        [HttpPost]
        public async Task<ActionResult> Create([Bind(Include = "ShipperName,ShipperCity")]OrderModel order)
        {

            try
            {
                var bootstrapContext = ClaimsPrincipal.Current.Identities.First().BootstrapContext as System.IdentityModel.Tokens.BootstrapContext;

                HttpClient client = new HttpClient();

                client.BaseAddress = new Uri(serviceUrl);

                client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", bootstrapContext.Token);

                HttpResponseMessage response = await client.PostAsJsonAsync("api/orders", order);

                if (response.IsSuccessStatusCode)
                {
                    return RedirectToAction("Index");
                }
                else
                {
                    // If the call failed with access denied, show the user an error indicating they might need to sign-in again.
                    if (response.StatusCode == System.Net.HttpStatusCode.Unauthorized)
                    {
                        return new RedirectResult("/Error?message=Error: " + response.ReasonPhrase + " You might need to sign in again.");
                    }
                }

                return new RedirectResult("/Error?message=An Error Occurred Creating Order: " + response.StatusCode);
            }
            catch (Exception ex)
            {
                return new RedirectResult("/Error?message=An Error Occurred Creating Order: " + ex.Message);
            }

        }

    }

    public class OrderModel
    {
        public string OrderID { get; set; }
        [Display(Name = "Shipper")]
        public string ShipperName { get; set; }
        [Display(Name = "Shipper City")]
        public string ShipperCity { get; set; }
        public DateTimeOffset TS { get; set; }
    }

What we have implemented here is the following:

  • We have added an 
    [Authorize]
     attribute on the controller so any unauthenticated (anonymous) request (Session cookie doesn’t exist) to any of the actions in this controller will result into a redirect to the Sign in policy we have configured.
  • Notice how we are reading the 
    BootstrapContext
     from the current “ClaimsPrincipal” object, this context will contain a property named “Token” which we will send in the “Authorization” header for the Web API. Note that if you forgot to set the property “SaveSigninToken” of the “TokenValidationParameters” to “true” then this will return “null”.
  • We are using HTTP Client to craft the requests and call the Web API endpoints we defined earlier. There is no need to pay attention to the User ID property in the MVC App as this property is encoded in the token itself, and the Web API will take the responsibility to decode it and store it in the Azure table storage along with order information.

Step 7: Add views for the Orders Controller

I will not dive into details here, as you know we need to add 2 views to support rendering the list of orders and creating a new order, for sake of completeness I will paste the cshtml for each view, so open a new folder named “Orders” under “Views” folder, then add 2 new views named “Index.cshtml” and “Create.cshtml” and paste the code as the below:

@model IEnumerable<AADB2C.WebClientMvc.Controllers.OrderModel>
@{
    ViewBag.Title = "Orders";
}
<h2>Orders</h2>
<br />
<p>
    @Html.ActionLink("Create New", "Create")
</p>

<table class="table table-bordered table-striped table-hover table-condensed" style="table-layout: auto">
    <thead>
        <tr>
            <td>Order Id</td>
            <td>Shipper</td>
            <td>Shipper City</td>
            <td>Date</td>
        </tr>
    </thead>
    @foreach (var item in Model)
    {
        <tr>
            <td>
                @Html.DisplayFor(modelItem => item.OrderID)
            </td>
            <td>
                @Html.DisplayFor(modelItem => item.ShipperName)
            </td>
            <td>
                @Html.DisplayFor(modelItem => item.ShipperCity)
            </td>
            <td>
                @Html.DisplayFor(modelItem => item.TS)
            </td>
        </tr>
    }
</table>

@model AADB2C.WebClientMvc.Controllers.OrderModel
@{
    ViewBag.Title = "New Order";
}
<h2>Create Order</h2>
@using (Html.BeginForm())
{
    <div class="form-horizontal">
        <hr />

        <div class="form-group">
            @Html.LabelFor(model => model.ShipperName, htmlAttributes: new { @class = "control-label col-md-2" })
            <div class="col-md-10">
                @Html.EditorFor(model => model.ShipperName, new { htmlAttributes = new { @class = "form-control" } })
            </div>
        </div>

        <div class="form-group">
            @Html.LabelFor(model => model.ShipperCity, htmlAttributes: new { @class = "control-label col-md-2" })
            <div class="col-md-10">
                @Html.EditorFor(model => model.ShipperCity, new { htmlAttributes = new { @class = "form-control" } })
            </div>
        </div>

        <div class="form-group">
            <div class="col-md-offset-2 col-md-10">
                <input type="submit" value="Save Order" class="btn btn-default" />
            </div>
        </div>
    </div>

    <div>
        @Html.ActionLink("Back to Orders", "Index")
    </div>
}

Step 8: Lastly, let’s test out the complete flow

To test this out the user will click on “Orders List” link from the top navigation menu, then he will be redirected to the Azure AD B2C tenant where s/he can enter the app local credentials, if the crednetials provided are valid then a successful authentication will take place and a token will be obtained and stored in the claims identity for the authenticated user, then the orders view are displayed the token is sent in the authorization header to get all orders for this user. It should be something as the animated image below:

Azure AD B2C animation

That’s it for now folks, I hope you find it useful 🙂 In the next post, I will cover how to integrate MSAL with Azure AD B2C and use it in a desktop application. If you find the post useful; then do not forget to share it 🙂

The Source code for this tutorial is available on GitHub.

The MVC Web App has been published on Azure App Services, so feel free to try it out using the Base URL (https://aadb2cmvcapp.azurewebsites.net/)

Follow me on Twitter @tjoudeh

Resources

The post Integrate Azure AD B2C with ASP.NET MVC Web App – Part 3 appeared first on Bit of Technology.


Damien Bowden: Implementing UNDO, REDO in ASP.NET Core

The article shows how to implement UNDO, REDO functionality in an ASP.NET Core application using EFCore and MS SQL Server.

This is the first blog in a 3 part series. The second blog will implement the UI using Angular 2 and the third article will improve the concurrent stacks with max limits to prevent memory leaks etc.

Code: https://github.com/damienbod/Angular2AutoSaveCommands

2016.08.19 ASP.NET Core 1.0.1

Other articles in this series:

  1. Implementing UNDO, REDO in ASP.NET Core
  2. Angular 2 Auto Save, Undo and Redo
  3. ASP.NET Core Action Arguments Validation using an ActionFilter

The application was created using the ASP.NET Core Web API template. The CommandDto class is used for all commands sent from the UI. The class is used for the create, update and delete requests. The class has 4 properties. The CommandType property defines the types of commands which can be sent. The supported CommandType values are defined as constants in the CommandTypes class. The PayloadType is used to define the type for the Payload JObject. The server application can then use this, to convert the JObject to a C# object. The ActualClientRoute is required to support the UNDO and REDO logic. Once the REDO or UNDO is executed, the client needs to know where to navigate to. The values are strings and are totally controlled by the client SPA application. The server just persists these for each command.

using Newtonsoft.Json.Linq;

namespace Angular2AutoSaveCommands.Models
{
    public class CommandDto
    {
        public string CommandType { get; set; }
        public string PayloadType { get; set; }
        public JObject Payload { get; set; }
        public string ActualClientRoute { get; set;}
    }
	
    public static  class CommandTypes
    {
        public const string ADD = "ADD";
        public const string UPDATE = "UPDATE";
        public const string DELETE = "DELETE";
        public const string UNDO = "UNDO";
        public const string REDO = "REDO";
    }
	
    public static class PayloadTypes
    {
        public const string Home = "HOME";
        public const string ABOUT = "ABOUT";
        public const string NONE = "NONE";
    }
}

The CommandController is used to provide the Execute, UNDO and REDO support for the UI, or any other client which will use the service. The controller injects the ICommandHandler which implements the logic for the HTTP POST requests.

using Angular2AutoSaveCommands.Models;
using Angular2AutoSaveCommands.Providers;
using Microsoft.AspNetCore.Mvc;
using Newtonsoft.Json.Linq;

namespace Angular2AutoSaveCommands.Controllers
{
    [Route("api/[controller]")]
    public class CommandController : Controller
    {
        private readonly ICommandHandler _commandHandler;
        public CommandController(ICommandHandler commandHandler)
        {
            _commandHandler = commandHandler;
        }

        [ServiceFilter(typeof(ValidateCommandDtoFilter))]
        [HttpPost]
        [Route("Execute")]
        public IActionResult Post([FromBody]CommandDto value)
        {
            _commandHandler.Execute(value);
            return Ok(value);
        }

        [HttpPost]
        [Route("Undo")]
        public IActionResult Undo()
        {
            var commandDto = _commandHandler.Undo();
            return Ok(commandDto);
        }

        [HttpPost]
        [Route("Redo")]
        public IActionResult Redo()
        {
            var commandDto = _commandHandler.Redo();
            return Ok(commandDto);
        }
    }
}

The ICommandHandler has three methods, Execute, Undo and Redo. The Undo and the Redo methods return a CommandDto class. This class contains the actual data and the URL for the client routing.

using Angular2AutoSaveCommands.Models;

namespace Angular2AutoSaveCommands.Providers
{
    public interface ICommandHandler 
    {
        void Execute(CommandDto commandDto);
        CommandDto Undo();
        CommandDto Redo();
    }
}

The CommandHandler class implements the ICommandHandler interface. This class provides the two ConcurrentStack fields for the REDO and the UNDO stack. The stacks are static and so need to be thread safe. The UNDO and the REDO return a CommandDTO which contains the relevant data after the operation which has been executed.

The Execute method just calls the execution depending on the payload. This method then creates the appropriate command, adds the command to the database for the history, executes the logic and adds the command to the UNDO stack.

The undo method pops a command from the undo stack, calls the Unexecute method, adds the command to the redo stack, and saves everything to the database.

The redo method pops a command from the redo stack, calls the Execute method, adds the command to the undo stack, and saves everything to the database.

using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using Angular2AutoSaveCommands.Models;
using Angular2AutoSaveCommands.Providers.Commands;
using Microsoft.Extensions.Logging;

namespace Angular2AutoSaveCommands.Providers
{
    public class CommandHandler : ICommandHandler
    {
        private readonly ICommandDataAccessProvider _commandDataAccessProvider;
        private readonly DomainModelMsSqlServerContext _context;
        private readonly ILoggerFactory _loggerFactory;
        private readonly ILogger _logger;

        // TODO remove these and used persistent stacks
        private static ConcurrentStack<ICommand> _undocommands = new ConcurrentStack<ICommand>();
        private static ConcurrentStack<ICommand> _redocommands = new ConcurrentStack<ICommand>();

        public CommandHandler(ICommandDataAccessProvider commandDataAccessProvider, DomainModelMsSqlServerContext context, ILoggerFactory loggerFactory)
        {
            _commandDataAccessProvider = commandDataAccessProvider;
            _context = context;
            _loggerFactory = loggerFactory;
            _logger = loggerFactory.CreateLogger("CommandHandler");
        }

        public void Execute(CommandDto commandDto)
        {
            if (commandDto.PayloadType == PayloadTypes.ABOUT)
            {
                ExecuteAboutDataCommand(commandDto);
                return;
            }

            if (commandDto.PayloadType == PayloadTypes.Home)
            {
                ExecuteHomeDataCommand(commandDto);
                return;
            }

            if (commandDto.PayloadType == PayloadTypes.NONE)
            {
                ExecuteNoDataCommand(commandDto);
                return;
            }
        }

        // TODO add return object for UI
        public CommandDto Undo()
        {  
            var commandDto = new CommandDto();
            commandDto.CommandType = CommandTypes.UNDO;
            commandDto.PayloadType = PayloadTypes.NONE;
            commandDto.ActualClientRoute = "NONE";

            if (_undocommands.Count > 0)
            {
                ICommand command;
                if (_undocommands.TryPop(out command))
                {
                    _redocommands.Push(command);
                    command.UnExecute(_context);
                    commandDto.Payload = command.ActualCommandDtoForNewState(CommandTypes.UNDO).Payload;
                    _commandDataAccessProvider.AddCommand(CommandEntity.CreateCommandEntity(commandDto));
                    _commandDataAccessProvider.Save();
                    return command.ActualCommandDtoForNewState(CommandTypes.UNDO);
                }   
            }

            return commandDto;
        }

        // TODO add return object for UI
        public CommandDto Redo()
        {
            var commandDto = new CommandDto();
            commandDto.CommandType = CommandTypes.REDO;
            commandDto.PayloadType = PayloadTypes.NONE;
            commandDto.ActualClientRoute = "NONE";

            if (_redocommands.Count > 0)
            {
                ICommand command;
                if(_redocommands.TryPop(out command))
                { 
                    _undocommands.Push(command);
                    command.Execute(_context);
                    commandDto.Payload = command.ActualCommandDtoForNewState(CommandTypes.REDO).Payload;
                    _commandDataAccessProvider.AddCommand(CommandEntity.CreateCommandEntity(commandDto));
                    _commandDataAccessProvider.Save();
                    return command.ActualCommandDtoForNewState(CommandTypes.REDO);
                }
            }

            return commandDto;
        }

        private void ExecuteHomeDataCommand(CommandDto commandDto)
        {
            if (commandDto.CommandType == CommandTypes.ADD)
            {
                ICommandAdd command = new AddHomeDataCommand(_loggerFactory, commandDto);
                command.Execute(_context);
                _commandDataAccessProvider.AddCommand(CommandEntity.CreateCommandEntity(commandDto));
                _commandDataAccessProvider.Save();
                command.UpdateIdforNewItems();
                _undocommands.Push(command);
            }

            if (commandDto.CommandType == CommandTypes.UPDATE)
            {
                ICommand command = new UpdateHomeDataCommand(_loggerFactory, commandDto);
                command.Execute(_context);
                _commandDataAccessProvider.AddCommand(CommandEntity.CreateCommandEntity(commandDto));
                _commandDataAccessProvider.Save();
                _undocommands.Push(command);
            }

            if (commandDto.CommandType == CommandTypes.DELETE)
            {
                ICommand command = new DeleteHomeDataCommand(_loggerFactory, commandDto);
                command.Execute(_context);
                _commandDataAccessProvider.AddCommand(CommandEntity.CreateCommandEntity(commandDto));
                _commandDataAccessProvider.Save();
                _undocommands.Push(command);
            }
        }

        private void ExecuteAboutDataCommand(CommandDto commandDto)
        {
            if(commandDto.CommandType == CommandTypes.ADD)
            {
                ICommandAdd command = new AddAboutDataCommand(_loggerFactory, commandDto);
                command.Execute(_context);
                _commandDataAccessProvider.AddCommand(CommandEntity.CreateCommandEntity(commandDto));
                _commandDataAccessProvider.Save();
                command.UpdateIdforNewItems();
                _undocommands.Push(command);
            }

            if (commandDto.CommandType == CommandTypes.UPDATE)
            {
                ICommand command = new UpdateAboutDataCommand(_loggerFactory, commandDto);
                command.Execute(_context);
                _commandDataAccessProvider.AddCommand(CommandEntity.CreateCommandEntity(commandDto));
                _commandDataAccessProvider.Save();
                _undocommands.Push(command);
            }

            if (commandDto.CommandType == CommandTypes.DELETE)
            {
                ICommand command = new DeleteAboutDataCommand(_loggerFactory, commandDto);
                command.Execute(_context);
                _commandDataAccessProvider.AddCommand(CommandEntity.CreateCommandEntity(commandDto));
                _commandDataAccessProvider.Save();
                _undocommands.Push(command);
            }
        }

        private void ExecuteNoDataCommand(CommandDto commandDto)
        {
            _commandDataAccessProvider.AddCommand(CommandEntity.CreateCommandEntity(commandDto));
            _commandDataAccessProvider.Save();
        }

    }
}

The ICommand interface contains the public methods required for the commands in this application. The DBContext is used as a parameter in the Execute and the Unexecute method because the context from the HTTP request is used, and not the original context from the Execute HTTP request.

using Angular2AutoSaveCommands.Models;

namespace Angular2AutoSaveCommands.Providers.Commands
{
    public interface ICommand
    {
        void Execute(DomainModelMsSqlServerContext context);
        void UnExecute(DomainModelMsSqlServerContext context);

        CommandDto ActualCommandDtoForNewState(string commandType);
    }
}

The UpdateAboutDataCommand class implements the ICommand interface. This command supplies the logic to update and also to undo an update in the execute and the unexecute methods. For the undo, the previous state of the entity is saved in the command.

 
using System;
using System.Linq;
using Angular2AutoSaveCommands.Models;
using Microsoft.Extensions.Logging;
using Newtonsoft.Json.Linq;

namespace Angular2AutoSaveCommands.Providers.Commands
{
    public class UpdateAboutDataCommand : ICommand
    {
        private readonly ILogger _logger;
        private readonly CommandDto _commandDto;
        private AboutData _previousAboutData;

        public UpdateAboutDataCommand(ILoggerFactory loggerFactory, CommandDto commandDto)
        {
            _logger = loggerFactory.CreateLogger("UpdateAboutDataCommand");
            _commandDto = commandDto;
        }

        public void Execute(DomainModelMsSqlServerContext context)
        {
            _previousAboutData = new AboutData();

            var aboutData = _commandDto.Payload.ToObject<AboutData>();
            var entity = context.AboutData.First(t => t.Id == aboutData.Id);

            _previousAboutData.Description = entity.Description;
            _previousAboutData.Deleted = entity.Deleted;
            _previousAboutData.Id = entity.Id;

            entity.Description = aboutData.Description;
            entity.Deleted = aboutData.Deleted;
            _logger.LogDebug("Executed");
        }

        public void UnExecute(DomainModelMsSqlServerContext context)
        {
            var aboutData = _commandDto.Payload.ToObject<AboutData>();
            var entity = context.AboutData.First(t => t.Id == aboutData.Id);

            entity.Description = _previousAboutData.Description;
            entity.Deleted = _previousAboutData.Deleted;
            _logger.LogDebug("Unexecuted");
        }

        public CommandDto ActualCommandDtoForNewState(string commandType)
        {
            if (commandType == CommandTypes.UNDO)
            {
                var commandDto = new CommandDto();
                commandDto.ActualClientRoute = _commandDto.ActualClientRoute;
                commandDto.CommandType = _commandDto.CommandType;
                commandDto.PayloadType = _commandDto.PayloadType;
            
                commandDto.Payload = JObject.FromObject(_previousAboutData);
                return commandDto;
            }
            else
            {
                return _commandDto;
            }
        }
    }
}

The startup class adds the interface/class pairs to the built-in IoC. The MS SQL Server is defined here using the appsettings to read the database connection string. EFCore migrations are used to create the database.

using System;
using System.Linq;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Hosting;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Angular2AutoSaveCommands.Providers;
using Microsoft.EntityFrameworkCore;

namespace Angular2AutoSaveCommands
{
    public class Startup
    {
        public Startup(IHostingEnvironment env)
        {
            var builder = new ConfigurationBuilder()
                .SetBasePath(env.ContentRootPath)
                .AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
                .AddJsonFile($"appsettings.{env.EnvironmentName}.json", optional: true)
                .AddEnvironmentVariables();
            Configuration = builder.Build();
        }

        public IConfigurationRoot Configuration { get; }

        public void ConfigureServices(IServiceCollection services)
        {
            var sqlConnectionString = Configuration.GetConnectionString("DataAccessMsSqlServerProvider");

            services.AddDbContext<DomainModelMsSqlServerContext>(options =>
                options.UseSqlServer(  sqlConnectionString )
            );

            services.AddMvc();

            services.AddScoped<ICommandDataAccessProvider, CommandDataAccessProvider>();
            services.AddScoped<ICommandHandler, CommandHandler>();
        }

        public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
        {
            loggerFactory.AddConsole(Configuration.GetSection("Logging"));
            loggerFactory.AddDebug();

            var angularRoutes = new[] {
                 "/home",
                 "/about"
             };

            app.Use(async (context, next) =>
            {
                if (context.Request.Path.HasValue && null != angularRoutes.FirstOrDefault(
                    (ar) => context.Request.Path.Value.StartsWith(ar, StringComparison.OrdinalIgnoreCase)))
                {
                    context.Request.Path = new PathString("/");
                }

                await next();
            });

            app.UseDefaultFiles();

            app.UseStaticFiles();

            app.UseMvc(routes =>
            {
                routes.MapRoute(
                    name: "default",
                    template: "{controller=Home}/{action=Index}/{id?}");
            });
        }
    }
}

The application api can be tested using fiddler. The following HTTP POST requests are sent in this order, execute(ADD), execute(UPDATE), Undo, Undo, Redo

http://localhost:5000/api/command/execute
User-Agent: Fiddler
Host: localhost:5000
Content-Type: application/json

{
  "commandType":"ADD",
  "payloadType":"ABOUT",
  "payload":
   { 
      "Id":0,
      "Description":"add a new about item",
      "Deleted":false
    },
   "actualClientRoute":"https://damienbod.com/add"
}

http://localhost:5000/api/command/execute
User-Agent: Fiddler
Host: localhost:5000
Content-Type: application/json

{
  "commandType":"UPDATE",
  "payloadType":"ABOUT",
  "payload":
   { 
      "Id":10003,
      "Description":"update the existing about item",
      "Deleted":false
    },
   "actualClientRoute":"https://damienbod.com/update"
}

http://localhost:5000/api/command/undo
http://localhost:5000/api/command/undo
http://localhost:5000/api/command/redo

The data is sent in this order and the undo, redo works as required.
undoRedofiddler_01

The data can also be validated in the database using the CommandEntity table.

undoRedosql_02

Links:

http://www.codeproject.com/Articles/33384/Multilevel-Undo-and-Redo-Implementation-in-Cshar



Andrew Lock: An introduction to OAuth 2.0 using Facebook in ASP.NET Core

An introduction to OAuth 2.0 using Facebook in ASP.NET Core

This is the next post in a series on authentication and authorisation in ASP.NET Core. In this post I look in moderate depth at the OAuth 2.0 protocol as it pertains to ASP.NET Core applications, walking through the protocol as seen by the user of your website as well as the application itself. Finally, I show how you can configure your application to use a Facebook social login when you are using ASP.NET Core Identity.

OAuth 2.0

OAuth 2.0 is an open standard for authorisation. It is commonly used as a way for users to login to a particular website (say, catpics.com) using a third party account such as a Facebook or Google account, without having to provide catpics.com the password for their Facebook account.

While it is often used for authentication, being used to log a user in to a site, it is actually an authorisation protocol. We'll discuss the detail of the flow of requests in the next sections, but in essence, you as a user are providing permission for the catpics.com website to access some sort of personal information from the OAuth provider website (Facebook). So catpics.com is able to access your personal Facebook cat pictures, without having full access to your account, and without requiring you to provide your password directly.

There are a number of different ways you can use OAuth 2.0, each of which require different parameters and different user interactions. Which one you should use depends on the nature of the application you are developing, for example:

  • Resource Owner Grant - Requires the user to directly enter their username and password to the application. Useful when you are developing a 1st party application to authenticate with your own servers, e.g. the Facebook mobile app might use a Resource Owner Grant to authenticate with Facebook's servers.
  • Implicit Grant - Authenticating with a server returns an access token to the browser which can then be used to access resources. Useful for Single Page Applications (SPA) where communication cannot be private.
  • Authorisation Code Grant - The typical OAuth grant used by web applications, such as you would use in your ASP.NET apps. This is the flow I will focus on for the rest of the article.

The Authorisation Code Grant

Before explaining the flow fully, we need to clarify some of the terminology. This is where I often see people getting confused with the use of overloaded terms like 'Client'. Unfortunately, these are taken from the official spec, so I will use them here as well, but for the remainder of the article I'll try and use disambiguated names instead.

We will consider an ASP.NET application that finds cats in your Facebook photos by using Facebook's OAuth authorisation.

  • Resource owner (e.g. the user) - This technically doesn't need to be a person as OAuth allows machine-to-machine authorisation, but for our purposes it is the end-user who is using your application.
  • Resource service (e.g. the Facebook API server) - This is the endpoint your ASP.NET application will call to access Facebook photos once it has been given an access token.
  • Client (e.g. your app) - This is the application which is actually making the requests to the Resource service. So in this case it is the ASP.NET application.
  • Authorisation server (e.g. the Facebook authorisation server) - This is the server that allows the user to login to their Facebook account.
  • Browser (e.g. Chrome, Safari) - Not required by OAuth in general, but for our example, the browser is the user-agent that the resource owner/user is using to navigate your ASP.NET application.

The flow

Now we have nailed some of the terminology, we can think about the actual flow of events and data when OAuth 2.0 is in use. The image below gives a detailed overview of the various interactions, from the user first requesting access to a protected resource, to them finally gaining access to it. The flow looks complicated, but the key points to notice are the three calls to Facebook's servers.

An introduction to OAuth 2.0 using Facebook in ASP.NET Core

As we go through the flow, we'll illustrate it from a user's point of view, using the default MVC template with ASP.NET Core Identity, configured to use Facebook as an external authentication mechanism.

Before you can use OAuth in your application, you first need to register your application with the Authorisation server (Facebook). There you will need to provide a REDIRECT_URI and you will be provided a CLIENT_ID and CLIENT_SECRET. The process is different for each Authorisation server so it is best to consult their developer docs for how to go about this. I'll cover how to register your application with Facebook later in this article.

Authorising to obtain an authorisation code

When the user requests a page on your app that requires authorisation, they will be redirected to the login page. Here they can either login using a username and password to create an account directly with the site, or they can choose to login with an external provider - in this case just Facebook.

An introduction to OAuth 2.0 using Facebook in ASP.NET Core

When the user clicks on the Facebook button, the ASP.NET application sends a 302 to the user's browser, with a url similar to the following:

https://www.facebook.com/v2.6/dialog/oauth?client_id=CLIENT_ID&scope=public_profile,email&response_type=code&redirect_uri=REDIRECT_URI&state=STATE_TOKEN  

This url points to the Facebook Authorisation server, and contains a number of replacement fields. The CLIENT_ID and REDIRECT_URI are the ones we registered and were provided when we registered our app in Facebook. The STATE_TOKEN is a CSRF token generated automatically by our application for security reasons (that I won't go into). Finally, the scope field indicates what resources we have requested access to - namely public_profile and their email.

Following this link, the user is directed in their browser to their Facebook login page. Once they have logged in, or if they are already logged in, they must grant authorisation to our registered ASP.NET application to access the requested fields:

An introduction to OAuth 2.0 using Facebook in ASP.NET Core

If the user clicks OK, then Facebook sends another 302 response to the browser, with a url similar to the following:

http://localhost:5000/signin-facebook?code=AUTH_CODE&state=STATE_TOKEN  

Facebook has provided an AUTH_CODE, along with the STATE_TOKEN we supplied with the initial redirect. The state can be verified to ensure that requests are not being forged by comparing it to the version stored in our session state in the ASP.NET application. The AUTH_CODE however is only temporary, and cannot be directly used to access the user details we need. Instead, we need to exchange it for an access token with the Facebook Authorisation server.

Exchanging for an access token

This next portion of the flow occurs entirely server side - communication occurs directly between our ASP.NET application and the Facebook authorisation server.

Our ASP.NET application constructs a POST request to the Facebook Authorization server, to an Access token endpoint. The request sends our app's registered details, including the CLIENT_SECRET and the AUTH_TOKEN to the Facebook endpoint:

POST /v2.6/oauth/access_token HTTP/1.1  
Host: graph.facebook.com  
Content-Type: application/x-www-form-urlencoded

grant_type=authorization_code&  
code=AUTH_CODE&  
redirect_uri=REDIRECT_URI&  
client_id=CLIENT_ID&  
client_secret=CLIENT_SECRET  

If the token is accepted by Facebook's Authorisation server, then it will respond with (among other things) an ACCESS_TOKEN. This access token allows our ASP.NET application to access the resources (scopes) we requested at the beginning of the flow, but we don't actually have the details we need in order to create the Claims for our user yet.

Accessing the protected resource

After receiving and storing the access token, our app can now contact Facebook's Resource server. We are still completely server-side at this point, communicating directly with Facebook's user information endpoint.

Our application constructs a GET request, providing the ACCESS_TOKEN and a comma separated (and URL encoded) list of requested fields in the querystring:

GET /v2.6/me?access_token=ACCESS_TOKEN&fields=name%2Cemail%2Cfirst_name%2Clast_name  
Host: graph.facebook.com  

Assuming all is good, Facebook's resource server should respond with the requested fields. Your application can then add the appropriate Claims to the ClaimsIdentity and your user is authenticated!

An introduction to OAuth 2.0 using Facebook in ASP.NET Core

The description provided here omits a number of things such as handling expiration and refresh tokens, as well as the ASP.NET Core Identity process or associating the login to an email, but hopefully it provides an intermediate view of what is happening as part of a social login.

Example usage in ASP.NET Core

If you're anything like me, when you first start looking at how to implement OAuth in your application, it all seems a bit daunting. There's so many moving parts, different grants and backchannel communication that it seems like it will be a chore to setup.

Luckily, the ASP.NET Core team have solved a massive amount of the headache for you! If you are using ASP.NET Core Identity, then adding external providers is a breeze. The ASP.NET Core documentation provides a great walkthrough to creating your application and getting it all setup.

Essentially, if you have an app that uses ASP.NET Core Identity, all that is required to add facebook authentication is to install the package in your project.json:

{
  "dependencies": {
    "Microsoft.AspNetCore.Authentication.Facebook": "1.0.0"
  }
}

and configure the middleware in your Startup.Configure method:

public void Configure(IApplicationBuilder app, IHostingEnvironment env)  
{

    app.UseStaticFiles();

    app.UseIdentity();

    app.UseFacebookAuthentication(new FacebookOptions
    {
        AppId = Configuration["facebook:appid"],
        AppSecret = Configuration["facebook:appsecret"],
        Scope = { "email" },
        Fields = { "name", "email" },
        SaveTokens = true,
    });

    app.UseMvc(routes =>
    {
        routes.MapRoute(
            name: "default",
            template: "{controller=Home}/{action=Index}/{id?}");
    });
}

You can see we are loading the AppId and AppSecret (our CLIENT_ID and CLIENT_SECRET) from configuration. On a development machine, these should be stored using the user secrets manager or environment variables (never commit them directly to your repository).

If you want to use a different external OAuth provider then you have several options. Microsoft provide a number of packages similar to the Facebook package shown which make integrating external logins simple. There are currently providers for Google, Twitter and (obviously) Microsoft accounts.

In addition, there are a number of open source libraries that provide similar handling of common providers. In particular, the AspNet.Security.OAuth.Providers repository has middleware for providers like GitHub, Foursquare, Dropbox and many others.

Alternatively, if a direct provider is not available, you can use the generic Microsoft.AspNetCore.Authentication.OAuth package on which these all build. For example Jerrie Pelser has an excellent post on configuring your ASP.NET Core application to use LinkedIn.

Registering your application with Facebook Graph API

As discussed previously, before you can use an OAuth provider, you must register your application with the provider to obtain the CLIENT_ID and CLIENT_SECRET, and to register your REDIRECT_URI. I will briefly show how to go about doing this for Facebook.

First, navigate to https://developers.facebook.com and login. If you have not already registered as a developer, you will need to register and agree to Facebook's policies.

An introduction to OAuth 2.0 using Facebook in ASP.NET Core

Once a developer, you can create a new web application by following the prompts or navigating to https://developers.facebook.com/quickstarts/?platform=web. Here you will be prompted to provide a name for your web application, and then to configure some basic details about it.

An introduction to OAuth 2.0 using Facebook in ASP.NET Core

Once created, navigate to https://developers.facebook.com/apps and click on your application's icon. You will be taken to your app's basic details. Here you can obtain the App Id and App Secret you will need in your application. Make a note of them (store them using your secrets manager).

An introduction to OAuth 2.0 using Facebook in ASP.NET Core

The last step is to configure the redirect URI for your application. Click on '+ Add Product' at the bottom of the menu and choose Facebook Login. This will enable OAuth for your application, and allow you to set the REDIRECT_URI for your application.

The redirect path for the Facebook middleware is /signin-facebook. In my case, I was only running the app locally, so my full redirect url was http://localhost:5000/signin-facebook.

An introduction to OAuth 2.0 using Facebook in ASP.NET Core

Assuming everything is setup correctly, you should now be able to use OAuth 2.0 to login to your ASP.NET Core application with Facebook!

Final thoughts

In this post I showed how you could use OAuth 2.0 to allow users to login to your ASP.NET Core application with Facebook and other OAuth 2.0 providers.

One point which is often overlooked is the fact that OAuth 2.0 is a protocol for performing authorisation, not authentication. The whole process is aimed at providing access to protected resources, rather than proving the identity of a user, which has some subtle security implications.

Luckily there is an another protocol OpenId Connect, which deals with many of these issues, which essentially provides and additional layer on top of the OAuth 2.0 protocol. I'll be doing a post on OpenId Connect soon, but if you want to learn more, I've provided some additional details below.

In the mean time, enjoy your social logins!


Pedro Félix: On contracts and HTTP APIs

Reading the twitter conversation started by this tweet

made me put in written words some of the ideas that I have about HTTP APIs, contracts and “out-of-band” information.
Since it’s vacations time, I’ll be brief and incomplete.

  • On any interface, it is impossible to avoid having contracts (i.e. shared “out-of-band” information) between provider and consumer. On a HTTP API, the syntax and semantics of HTTP itself is an example of this shared information. If JSON is used as a base for the representation format, then its syntax and semantics rules are another example of shared “out-of-band” information.
  • However not all contracts are equal in the generality, flexibility and evolvability they allow. Having the contract include a fixed resource URI is very different from having the contract defining a link relation. The former prohibits any change on the URI structure (e.g. host name, HTTP vs HTTPS, embedded information), while the later one enables it. Therefore, designing the contract is a very important task when creating HTTP APIs. And since the transfer contract is already rather well defined by HTTP, most of the design emphasis should be on the representation contract, include the hypermedia components.
  • Also, not all contracts have the same cost to implement (e.g. having hardcoded URIs is probably simpler than having to find links on representations), so (as usual) trade-offs have to be taken into account.
  • When implementing HTTP APIs is also very important to have the contract-related areas clearly identified. For me, this typically involves being able to easily answering questions such as: – Will I be breaking the contract if
    • I change this property name on this model?
    • I add a new property to this model?
    • I change the routing rules (e.g. adding a new path segment)?

Hope this helps
Looking forward for feedback

 



Damien Bowden: ASP.NET Core 1.0 with MySQL and Entity Framework Core

This article shows how to use MySQL with ASP.NET Core 1.0 using Entity Framework Core.

Code: https://github.com/damienbod/AspNet5MultipleProject

Thanks to Noah Potash for creating this example and adding his code to this code base.

The Entity Framework MySQL package can be downloaded using the NuGet package SapientGuardian.EntityFrameworkCore.MySql. At present no official provider from MySQL exists for Entity Framework Core which can be used in an ASP.NET Core application.

The SapientGuardian.EntityFrameworkCore.MySql package can be added to the project.json file.

{
  "dependencies": {
    "Microsoft.NETCore.App": {
      "version": "1.0.0",
      "type": "platform"
    },
    "DomainModel": "*",
    "SapientGuardian.EntityFrameworkCore.MySql": "7.1.4"
  },

  "frameworks": {
    "netcoreapp1.0": {
      "imports": [
        "dotnet5.6",
        "dnxcore50",
        "portable-net45+win8"
      ]
    }
  }
}

An EfCore DbContext can be added like any other context supported by Entity Framework Core.

using System;
using System.Linq;
using DomainModel.Model;
using Microsoft.EntityFrameworkCore;
using Microsoft.Extensions.Configuration;

namespace DataAccessMySqlProvider
{ 
    // >dotnet ef migration add testMigration
    public class DomainModelMySqlContext : DbContext
    {
        public DomainModelMySqlContext(DbContextOptions<DomainModelMySqlContext> options) :base(options)
        { }
        
        public DbSet<DataEventRecord> DataEventRecords { get; set; }

        public DbSet<SourceInfo> SourceInfos { get; set; }

        protected override void OnModelCreating(ModelBuilder builder)
        {
            builder.Entity<DataEventRecord>().HasKey(m => m.DataEventRecordId);
            builder.Entity<SourceInfo>().HasKey(m => m.SourceInfoId);

            // shadow properties
            builder.Entity<DataEventRecord>().Property<DateTime>("UpdatedTimestamp");
            builder.Entity<SourceInfo>().Property<DateTime>("UpdatedTimestamp");

            base.OnModelCreating(builder);
        }

        public override int SaveChanges()
        {
            ChangeTracker.DetectChanges();

            updateUpdatedProperty<SourceInfo>();
            updateUpdatedProperty<DataEventRecord>();

            return base.SaveChanges();
        }

        private void updateUpdatedProperty<T>() where T : class
        {
            var modifiedSourceInfo =
                ChangeTracker.Entries<T>()
                    .Where(e => e.State == EntityState.Added || e.State == EntityState.Modified);

            foreach (var entry in modifiedSourceInfo)
            {
                entry.Property("UpdatedTimestamp").CurrentValue = DateTime.UtcNow;
            }
        }
    }
}

In an ASP.NET Core web application, the DbContext is added to the application in the startup class. In this example, the DbContext is defined in a different class library. The MigrationsAssembly needs to be defined, so that the migrations will work. If the context and the migrations are defined in the same assembly, this is not required.

public Startup(IHostingEnvironment env)
{
	var builder = new ConfigurationBuilder()
		.SetBasePath(env.ContentRootPath)
		.AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
		.AddJsonFile("config.json", optional: true, reloadOnChange: true);

	Configuration = builder.Build();
}
		
public void ConfigureServices(IServiceCollection services)
{	
	var sqlConnectionString = Configuration.GetConnectionString("DataAccessMySqlProvider");

	services.AddDbContext<DomainModelMySqlContext>(options =>
		options.UseMySQL(
			sqlConnectionString,
			b => b.MigrationsAssembly("AspNet5MultipleProject")
		)
	);
}

The application uses the configuration from the config.json. This file is used to get the MySQL connection string, which is used in the Startup class.

{
    "ConnectionStrings": {  
        "DataAccessMySqlProvider": "server=localhost;userid=damienbod;password=1234;database=damienbod;"
        }
    }
}

MySQL workbench can be used to add the schema ‘damienbod’ to the MySQL database. The user ‘damienbod’ is also required, which must match the defined user in the connection string. If you configure the MySQL database differently, then you need to change the connection string in the config.json file.

mySql_ercore_aspnetcore_01

Now the database migrations can be created and the database can be updated.

>
> dotnet ef migrations add testMySql
>
> dotnet ef database update
>

If successful, the tables are created.

mySql_ercore_aspnetcore_02

The MySQL provider can be used in a MVC 6 controller using construction injection.

using System.Collections.Generic;
using DomainModel;
using DomainModel.Model;
using Microsoft.AspNetCore.Mvc;
using Newtonsoft.Json;

namespace AspNet5MultipleProject.Controllers
{
    [Route("api/[controller]")]
    public class DataEventRecordsController : Controller
    {
        private readonly IDataAccessProvider _dataAccessProvider;

        public DataEventRecordsController(IDataAccessProvider dataAccessProvider)
        {
            _dataAccessProvider = dataAccessProvider;
        }

        [HttpGet]
        public IEnumerable<DataEventRecord> Get()
        {
            return _dataAccessProvider.GetDataEventRecords();
        }

        [HttpGet]
        [Route("SourceInfos")]
        public IEnumerable<SourceInfo> GetSourceInfos(bool withChildren)
        {
            return _dataAccessProvider.GetSourceInfos(withChildren);
        }

        [HttpGet("{id}")]
        public DataEventRecord Get(long id)
        {
            return _dataAccessProvider.GetDataEventRecord(id);
        }

        [HttpPost]
        public void Post([FromBody]DataEventRecord value)
        {
            _dataAccessProvider.AddDataEventRecord(value);
        }

        [HttpPut("{id}")]
        public void Put(long id, [FromBody]DataEventRecord value)
        {
            _dataAccessProvider.UpdateDataEventRecord(id, value);
        }

        [HttpDelete("{id}")]
        public void Delete(long id)
        {
            _dataAccessProvider.DeleteDataEventRecord(id);
        }
    }
}

The controller api can be called using Fiddler:

POST http://localhost:5000/api/dataeventrecords HTTP/1.1
User-Agent: Fiddler
Host: localhost:5000
Content-Length: 135
Content-Type: application/json;
 
{
  "DataEventRecordId":3,
  "Name":"Funny data",
  "Description":"yes",
  "Timestamp":"2015-12-27T08:31:35Z",
   "SourceInfo":
  { 
    "SourceInfoId":0,
    "Name":"Beauty",
    "Description":"second Source",
    "Timestamp":"2015-12-23T08:31:35+01:00",
    "DataEventRecords":[]
  },
 "SourceInfoId":0 
}

The data is added to the database as required.

mySql_ercore_aspnetcore_03

Links:

https://github.com/SapientGuardian/SapientGuardian.EntityFrameworkCore.MySql

http://dev.mysql.com/downloads/mysql/

Experiments with Entity Framework Core and ASP.NET Core 1.0 MVC

https://docs.efproject.net/en/latest/miscellaneous/connection-strings.html

HowTo: Starting with MySQL EF Core provider and Connector/Net 7.0.4



Andrew Lock: An introduction to Session storage in ASP.NET Core

An introduction to Session storage in ASP.NET Core

A common requirement of web applications is the need to store temporary state data. In this article I discuss the use of Session storage for storing data related to a particular user or browser session.

Options for storing application state

When building ASP.NET Core applications, there are a number of options available to you when you need to store data that is specific to a particular request or session.

One of the simplest methods is to use querystring parameters or post data to send state to subsequent requests. However doing so requires sending that data to the user's browser, which may not be desirable, especially for sensitive data. For that reason, extra care must be taken when using this approach.

Cookies can also be used to store small bits of data, though again, these make a roundtrip to the user's browser, so must be kept small, and if sensitive, must be secured.

For each request there exists a property Items on HttpContext. This is an IDictionary<string, object> which can be used to store arbitrary objects against a string key. The data stored here lasts for just a single request, so can be useful for communicating between middleware components and storing state related to just a single request.

Files and database storage can obviously be used to store state data, whether related to a particular user or the application in general. However they are typically slower to store and retrieve data than other available options.

Session state relies on a cookie identifier to identify a particular browser session, and stores data related to the session on the server. This article focuses on how and when to use Session in your ASP.NET Core application.

Session in ASP.NET Core

ASP.NET Core supports the concept of a Session out of the box - the HttpContext object contains a Session property of type ISession. The get and set portion of the interface is shown below (see the full interface here):

public interface ISession  
{
    bool TryGetValue(string key, out byte[] value);
    void Set(string key, byte[] value);
    void Remove(string key);
}

As you can see, it provides a dictionary-like wrapper over the byte[] data, accessing state via string keys. Generally speaking, each user will have an individual session, so you can store data related to a single user in it. However you cannot technically consider the data secure as it may be possible to hijack another user's session, so it is not advisable to store user secrets in it. As the documentation states:

You can’t necessarily assume that a session is restricted to a single user, so be careful what kind of information you store in Session.

Another point to consider is that the session in ASP.NET Core is non-locking, so if multiple requests modify the session, the last action will win. This is an important point to consider, but should provide a significant performance increase over the locking session management used in the previous ASP.NET 4.X framework.

Under the hood, Session is built on top of IDistributedCache, which can be used as a more generalised cache in your application. ASP.NET Core ships with a number of IDistributedCache implementations, the simplest of which is an in-memory implementation, MemoryCache, which can be found in the Microsoft.Extensions.Caching.Memory package.

MVC also exposes a TempData property on a Controller which is an additional wrapper around Session. This can be used for storing transient data that only needs to be available for a single request after the current one.

Configuring your application to use Session

In order to be able to use Session storage in your application, you must configure the required Session services, the Session middleware, and an IDistributedCache implementation. In this example I will be using the in-memory distributed cache as it is simple to setup and use, but the documentation states that this should only be used for development and testing sites. I suspect this reticence is due it not actually being distributed and the fact that app restarts will clear the session.

First, add the IDistributedCache implementation and Session state packages to your project.json:

dependencies: {  
  "Microsoft.Extensions.Caching.Memory" : "1.0.0",
  "Microsoft.AspNetCore.Session": "1.0.0"
}

Next, add the required services to Startup in ConfigureServices:

public void ConfigureServices(IServiceCollection services)  
{
    services.AddMvc();

    services.AddDistributedMemoryCache();
    services.AddSession();
}

Finally, configure the session middleware in the Startup.Configure method. As with all middleware, order is important in this method, so you will need to enable the session before you try and access it, e.g. in your MVC middleware:

public void Configure(IApplicationBuilder app)  
{
    app.UseStaticFiles();

    //enable session before MVC
    app.UseSession();

    app.UseMvc(routes =>
    {
        routes.MapRoute(
            name: "default",
            template: "{controller=Home}/{action=Index}/{id?}");
    });
}

With all this in place, the Session object can be used to store our data.

Storing data in Session

As shown previously, objects must be stored in Session as a byte[], which is obviously not overly convenient. To alleviate the need to work directly with byte arrays, a number of extensions exist for fetching and setting int and string. Storing more complex objects requires serialising the data.

As an example, consider the simple usage of session below.

public IActionResult Index()  
{
    const string sessionKey = "FirstSeen";
    DateTime dateFirstSeen;
    var value = HttpContext.Session.GetString(sessionKey);
    if (string.IsNullOrEmpty(value))
    {
        dateFirstSeen = DateTime.Now;
        var serialisedDate = JsonConvert.SerializeObject(dateFirstSeen);
        HttpContext.Session.SetString(sessionKey, serialisedDate);
    }
    else
    {
        dateFirstSeen = JsonConvert.DeserializeObject<DateTime>(value);
    }

    var model = new SessionStateViewModel
    {
        DateSessionStarted = dateFirstSeen,
        Now = DateTime.Now
    };

    return View(model);
}

This action simply simply returns a view with a model that shows the current time, and the time the session was initialised.

First, the Session is queried using GetString(key). If this is the first time that action has been called, the method will return null. In that case, we record the current date, serialise it to a string using Newtonsoft.Json, and store it in the session using SetString(key, value).

On subsequent requests, the call to GetString(key) will return our serialised DateTime which we can set on our view model for display. After the first request to our action, the DateSessionStarted property will differ from the Now property on our model:

An introduction to Session storage in ASP.NET Core

This was a very trivial example, but you can store any data that is serialisable to a byte[] in the Session. The JSON serialisation used here is an easy option as it is likely already used in your project. Obviously, serialising and deserialising large objects on every request could be a performance concern, so be sure to think about the implications of using Session storage in your application.

Customising Session configuration

When configuring your session in Startup, you can provide an instance of StartupOptions or a configuration lambda to either the UseSession or AddSession calls respectively. This allows you to customise details about the session cookie that is used to track the session in the browser. For example you can customise the cookie name, domain, path and how long the session may be idle before the session expires. You will likely not need to change the defaults, but it may be necessary in some cases:

services.AddSession(opts =>  
    {
        opts.CookieName = ".NetEscapades.Session";
        opts.IdleTimeout = TimeSpan.FromMinutes(5);
    });

Note the cookie name is not the default .AspNetCore.Session:

An introduction to Session storage in ASP.NET Core

It's also worth noting that in ASP.NET Core 1.0, you cannot currently mark the cookie as Secure. This has been fixed here so should be in the 1.1.0 release (probably Q4 206/ Q1 2017).

Summary

In this post we saw an introduction to using Session storage in an ASP.NET Core application. We saw how to configure the required services and middleware, and to use it to store and retrieve simple strings to share state across requests.

As mentioned previously, it's important to not store sensitive user details in Session due to potential security issues, but otherwise it is a useful location for storage of serialisable data.

Further Reading


Taiseer Joudeh: Azure Active Directory B2C Overview and Policies Management – Part 1

Prior joining Microsoft I was heavily involved in architecting and building a large scale HTTP API which will be consumed by a large number of mobile application consumers on multiple platforms (iOS, Android, and Windows Phone). Securing the API and architecting the Authentication and Authorization part for the API was one of the large and challenging features which we built from scratch as we needed only to support local database account (allowing users to login using their own existing email/username and password). As well writing a proprietary code for each platform to consume the Authentication and Authorization end points, storing the tokens, and refresh them silently was a bit challenging and required skilled mobile apps developers to implement it securely on the different platforms. Don’t ask me why we didn’t use Xamarin for cross-platform development, it is a long story 🙂 During developing the back-end API I have learned that building identity management solution is not a trivial feature, and it is better to outsource it to a cloud service provider if this is a feasible option and you want your dev team to focus on building what matters; your business features!

Recently Microsoft has announced the general availability in North America data centers of a service named “Azure Active Directory B2C” which in my humble opinion will fill the gap of having a cloud identity and access management service targeted especially for mobile apps and web developers who need to build apps for consumers; consumers who want to sign in with their existing email/usernames, create new app-specific local accounts, or use their existing social accounts (Facebook, Google, LinkedIn, Amazon, Microsoft account) to sign in into the mobile/web app.

Azure Active Directory B2C

The Azure Active Directory B2C will allow backend developers to focus on the core business of their services while they outsource the identity management to Azure Active Directory B2C including (Signing-in, Signing-up, Password reset, Edit Profile, etc..). One important feature to mention here that the service can run on Azure cloud while your HTTP API is hosted on-premise, there is no need to have everything in the cloud if your use case requires hosting your services on-premise.  You can read more about all the features of Azure Active Directory B2C by visiting their official page.

The Azure Active Directory B2C can integrate seamlessly with the new unified authentication library named MSAL (Microsoft Authentication Library), this library will help developers to obtain tokens from Active Directory, Azure Active Directory B2C, and MSA for accessing protected resources. The library will support different platforms covering: .NET 4.5 + (Desktop Apps and Web apps), Windows Universal Apps, Windows Store apps (Windows 8 and above), iOS (via Xamarin), Android (via Xamarin), and .Net Core. Library still in preview, it should not be used in production application yet.

So during this series of posts, I will be covering different aspects of Azure Active Directory B2C as well integrating it with MSAL (Microsoft Authentication Library) in different front-end platforms (Desktop Application and Web Application).

Azure Active Directory B2C Overview and Policies Management

The source code for this tutorial is available on GitHub.

The MVC APP has been published on Azure App Services, so feel free to try it out using the Base URL (https://aadb2cmvcapp.azurewebsites.net)

I broke down this series into multiple posts which I’ll be posting gradually, posts are:

What we’ll build in this tutorial?

During this post we will build a Web API 2 HTTP API which will be responsible for managing shipping orders (i.e. Listing orders, adding new ones, etc…), the orders data will be stored in Azure Table Storage, while we will outsource all the identity management to Azure Active Directory B2C, where service users/consumers will rely on AAD B2C to signup new accounts using their app-specific email/password, then allow them to login using their app-specific accounts.

Saying this we need a front-end apps to manipulate orders and communicate with the HTTP API, We will build a different type of apps during the series of posts where some of them will use MSAL.

So the components that all the tutorials will be built from are:

  • Azure Active Directory B2C tenant for identity management, it will act as our IdP (Identity Provider).
  • ASP.NET Web API 2 acting as HTTP API Service and secured by the Azure Active Directory B2C tenant.
  • Different front end apps which will communicate with Azure Active Directory B2C to sign-in users, obtain tokens, send them to the protected HTTP API, and retrieve results from the HTTP API and project it on the front end applications.

So let’s get our hands dirty and start building the tutorial.

Building the Back-end Resource (Web API)

Step 1: Creating the Web API Project

In this tutorial, I’m using Visual Studio 2015 and .Net framework 4.5.2, to get started create an empty solution and name it “WebApiAzureAcitveDirectoryB2C.sln”, then add new empty ASP.NET Web application named “AADB2C.Api”, the selected template for the project will be “Empty” template with no core dependencies, check the image below:

VS2015 Web Api Template

Once the project has been created, click on it’s properties and set “SSL Enabled” to “True”, copy the “SSL URL” value and right lick on project, select “Properties”, then select the “Web” tab from the left side and paste the “SSL URL” value in the “Project Url” text field and click “Save”. We need to allow https scheme locally once we debug the application. Check the image below:

Web Api SSL Enable

Note: If this is the first time you enable SSL locally, you might get prompted to install local IIS Express Certificate, click “Yes”.

Step 2: Install the needed NuGet Packages to bootstrap the API

This project is empty so we need to install the NuGet packages needed to setup our Owin server and configure ASP.NET Web API 2 to be hosted within an Owin server, so open NuGet Package Manager Console and install the below packages:

Install-Package Microsoft.AspNet.WebApi -Version 5.2.3
Install-Package Microsoft.AspNet.WebApi.Owin -Version 5.2.3
Install-Package Microsoft.Owin.Host.SystemWeb -Version 3.0.1

Step 3: Add Owin “Startup” Class

We need to build the API components because we didn’t use a ready made template, this way is cleaner and you understand the need and use for each component you install in your solution, so add a new class named “Startup”. It will contain the code below, please note that the method “ConfigureAuth” is left empty intentionally as we will visit this class many times after we create our Azure Active Directory B2C tenant, what I need to do now is to build the API without anything protection then protect with our new Azure Active Directory B2C IdP:

public class Startup
    {

        public void Configuration(IAppBuilder app)
        {
            HttpConfiguration config = new HttpConfiguration();

            // Web API routes
            config.MapHttpAttributeRoutes();

            ConfigureOAuth(app);

            app.UseWebApi(config);

        }

        public void ConfigureOAuth(IAppBuilder app)
        {
           
        }
    }

Step 4: Add support to store data on Azure Table Storage

Note: I have decided to store the fictitious data about customers orders in Azure table storage as this service will be published online and I need to demonstrate the features on how to distinguish users data based on the signed-in user, feel free to use whatever permanent storage you like to complete this tutorial, the implementation here is simple so you can replace it with a SQL Server, MySQL, or any other NoSQL store.

So let’s add the needed NuGet packages which allow us to access the Azure Table Storage in a .NET client, I recommend you to refer to the official documentation if you need to read more about Azure Table Storage.

Install-Package WindowsAzure.Storage
Install-Package Microsoft.WindowsAzure.ConfigurationManager

Step 5: Add Web API Controller responsible for orders management

Now we want to add a controller which is responsible for orders management (Adding orders, listing all orders which belong to a certain user) . So add new controller named “OrdersController” inside a folder named “Controllers” and paste the code below:

[RoutePrefix("api/Orders")]
    public class OrdersController : ApiController
    {
        CloudTable cloudTable = null;

        public OrdersController()
        {
            // Retrieve the storage account from the connection string.
            CloudStorageAccount storageAccount = CloudStorageAccount.Parse(CloudConfigurationManager.GetSetting("StorageConnectionString"));

            // Create the table client.
            CloudTableClient tableClient = storageAccount.CreateCloudTableClient();

            // Retrieve a reference to the table.
            cloudTable = tableClient.GetTableReference("orders");

            // Create the table if it doesn't exist.
            // Uncomment the below line if you are not sure if the table has been created already
            // No need to keep checking that table exixts or not.
            //cloudTable.CreateIfNotExists();
        }

        [Route("")]
        public IHttpActionResult Get()
        {
         
            //This will be read from the access token claims.
            var userId = "TaiseerJoudeh";

            TableQuery <OrderEntity> query = new TableQuery<OrderEntity>()
                .Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, userId));

            var orderEntitis = cloudTable.ExecuteQuery(query).Select(
                o => new OrderModel() {
                OrderID = o.RowKey,
                ShipperName = o.ShipperName,
                ShipperCity = o.ShipperCity,
                TS = o.Timestamp
                });

            return Ok(orderEntitis);
        }

        [Route("")]
        public IHttpActionResult Post (OrderModel order)
        {
            //This will be read from the access token claims.
            var userId = "TaiseerJoudeh";

            OrderEntity orderEntity = new OrderEntity(userId);

            orderEntity.ShipperName = order.ShipperName;
            orderEntity.ShipperCity = order.ShipperCity;

            TableOperation insertOperation = TableOperation.Insert(orderEntity);

            // Execute the insert operation.
            cloudTable.Execute(insertOperation);

            order.OrderID = orderEntity.RowKey;

            order.TS = orderEntity.Timestamp;

            return Ok(order);
        }
    }

    #region Classes

    public class OrderModel
    {
        public string OrderID { get; set; }
        public string ShipperName { get; set; }
        public string ShipperCity { get; set; }
        public DateTimeOffset TS { get; set; }
    }

    public class OrderEntity : TableEntity
    {
        public OrderEntity(string userId)
        {
            this.PartitionKey = userId;
            this.RowKey = Guid.NewGuid().ToString("N");
        }

        public OrderEntity() { }

        public string ShipperName { get; set; }

        public string ShipperCity { get; set; }

    }

    #endregion

What we have implemented above is very straight forward, in the constructor of the controller, we have read the connection string for the Azure Table Storage from the web.config and created a cloud table instance which references the table named “Orders”. This table will hold the Orders data.

The structure of the table is you are thinking in SQL context even Azure Table Storage is NoSQL store is simple and it is represented in the class named “OrderEntity”, the “PartitionKey” will represent the “UserId”, and the “RowKey” will represent the “OrderId”. The “OrderId” will always contain an auto generated value.

Please note the following: a) You should not store the connection string for the table storage in web.config, it is better to use Azure Key Vault for a secure way to store your keys or you can set from Azure App Settings if you are going to host the Api on Azure. b) The “UserId” now is fixed, but eventually, it will read the authenticated UserId from the access token claims once we establish the IdP provider and configure our API to rely on Azure Active Directory B2C to protect it.

By taking a look at the “POST” action, you will notice that we are adding a new record to the table storage, and the “UserId” is fixed for now and we will visit this and fix it. The same applies to the “GET” action where we read the data from Azure Table Storage for a fixed user.

Now the API is ready for testing, you can issue a GET request or POST request and the data will be stored under the fixed “UserId” which is “TaiseerJoudeh”. Note that there is no Authorization header set as the API still publicly available for anyone. Below is a reference for the POST request:

POST Request:

POST /api/orders HTTP/1.1
Host: localhost:44339
Content-Type: application/json
Cache-Control: no-cache
Postman-Token: 6f1164fa-8560-98fd-6566-892517f1003e

{
    "shipperName" :"Nike",
    "shipperCity": "Clinton"
}

Configuring the Azure Active Directory B2C Tenant

Step 5: Create an Azure Active Directory B2C tenant

Now we need to create the Azure Active Directory B2C tenant, for the mean time you can create it from the Azure Classic Portal and you will be able to manage all the settings from the new Azure Preview Portal.

  • To start the creation process login to the classic portal and navigate to: New > App Services > Active Directory > Directory > Custom Create as the image below:

Azure AD B2C Directory

  • A new popup will appear as the image below asking you to fill some information, note that if you selected one of the following countries (United States, Canada, Costa Rica, Dominican Republic, El Salvador, Guatemala, Mexico, Panama, Puerto Rico and Trinidad and Tobago) your Azure AD B2C will be Production-Scale tenant, as Azure AD B2C is GA only in the countries listed (North America). This will change in the coming months and more countries will be announced as GA. You can read more about the road map of Azure AD B2C here. Do not forget to check “This is a B2C directory” for sure 🙂

Azure AD B2C New Directory

  • After your tenant has been created, it will appear in the Active Directory extension bar, as the image below; select the tenant and click on “Configure” tab,  then click on “Manage B2C Settings” as the image below. This will open the new Azure Preview Portal where we will start registering the App and managing policies there.

Azure AD B2C Manage Settings

Step 6: Register our application in Azure AD B2C tenant

Now we need to register the application under the tenant we’ve created, this will allow us to add the sign-in, sign-up, edit profile features in our app, to do so follow the below steps:

  • Select “Applications” from the “Settings” blade for the B2C tenant we’ve created, then click on the “Add” Icon on the top
  • A new blade will open asking you to fill the following information
    • Name: This will be the application name that will describe your application to consumers. In our case I have used “BitofTech Demo App”
    • Web API/Web APP: we need to turn this on as we are protecting a Web Api and Web app.
    • Allow implicit flow: We will turn this on as well as we need to use OpenId connect protocol to obtain an id token
    • Reply URL: those are the registered URLs where the Azure Active Directory B2C will send the authentication response to (tokens) or error responses to. The client applications calling the API can specify the Reply URL, but it should be registered in the tenant by the administrator in order to work. In our case I will put the Reply URL now to the Web API URL which is “https://localhost:44339/” this will be good for testing purposes but in the next post I will add another URL for the Web application we will build to consume the API. As you notice you can register many Reply URLs so you can support different environments (Dev, staging, production, etc…)
    • Native Client: You need to turn this on if you are building mobile application or desktop application client, for the mean time there is no need to turn it on as we are building web application (Server side app) but we will visit this again in the coming posts and enable this once we build a desktop app to consume the API.
    • App key or App secret: This will be used to generate a “Client Secret” for the App which is needed to authenticate the App in the Authorization/Hybrid OAuth 2.0 flow. We will need this in the future posts once I describe how we can obtain access tokens, open id tokens and refresh tokens using Raw HTTP requests. For the mean time, there is no need to generate an App key.
  • One you fill all the information, click “Save” and the application will be created and Application ID will be generated, copy this value and keep it on the notepad as we will use it later on.
  • below an image which shows the App after filling the needed information:

Azure AD B2C New App

Step 7: Selecting Identity Providers

Azure Active Directory B2C offers multiple social identity providers Microsoft, Google, Amazon, LinkedIn and Facebook in addition to the local App-specific accounts. The local account can be configured to use a “Username” or “Email” as a unique attribute for the account, we will use the “Email” and we will use only the local accounts in this tutorial to keep things simple and straight forward.

You can change the “Identity Providers” by selecting the “Identity providers” blade. This link will be helpful if you need to configure it.

Step 8: Add custom attributes

Azure AD B2C directory comes with a set of “built-in” attributes that represents information about the user, attributes such as (Email, First name, Last name, etc…) those attributes can be extended in case you needed to add extra information about the user upon signing up (creating a profile) or editing it.

At the mean time you can create an attribute and set the datatype for it as “String”, I believe that this limitation would be resolved in the coming releases.

To do so select “User attributes” blade and click on the “Add” icon, a new blade will open asking you to fill the attribute name, data type and description. In our case, I’ve added an attribute named “Gender” to capture the gender of the user during the registration process (profile creation or sign up). Below an image which represents this process:

B2C Custom Attribute

We will see in the next steps how we can retrieve this custom attribute value in our application, there are 2 ways to do so, first one is to include it in claims encoded in the token and the second one is to use Azure AD Graph API. We will use the first method.

In the next step, I will show you how to include this custom attribute in the sign-up policy.

Step 9: Creating different policies

The unique thing about Azure Active Directory B2C is using the extensible policy framework, which allows the developers to define an easy and reusable way to build the identity experience that they want to provide for application consumers (end users). So for example to enroll a new user in your app and create a app-specific local account, you need to create a Signup Policy where you configure the attributes needed to capture it from the user, you configure the attributes (claims) you need to retrieve after successfully executing the policy, you can configure which identity providers consumers are allowed to use, as well you can configure the look and feel for the signup page by doing simple modifications such as changing label names, the order of the fields, or replace the UI entirety (more about this in future post). All this applies to other policies used to implement identity features such as signing in, editing profile.

As well by using the extensible policies framework we can create multiple policies of different types in our tenant and use them in our applications as needed. Policies can be reused across applications, as well they can be exported and uploaded for easier management. This allows us to define and modify identity experiences with minimal or no changes to application code.

Now let’s create the first policy which is the “Signup” policy which will build the experience for users during the signup process and I show you how to test it out. to do so follow the below steps:

  • Select the “Sign-up” policies.
  • Click on the “Add” icon at the top of the blade.
  • Select a name for the policy, picking up a clear name is important as we will reference the name in our application, in our case I’ve used “signup”.
  • Select the “Identity providers” and select “Email signup”. In our case this the only provider we have configured for this tenant so far.
  • Select the “Sign-up” attributes. Now we have the chance to choose the attributes we want to collect from the user during the signup process. I have selected 6 attributes as the image below.
  • Select the “Application claims”. Now we have the chance to choose the claims we want to return in the tokens sent back to out application after a successful signup process, remember that those claims are encoded within the token so do not get crazy about adding many claims as the token size will increase. I have selected 9 claims as the image below.
  • Finally, click on “Create” button.

Signup Policy Attribute

Notes:

  • The policy that will be created will be named as “B2C_1_signup” all the policies will be prefixed by “B2C_1_” fragment, do not ask me why but it seems its implementation detail 🙂
  • You can change the attribute label names (Surname -> Last Name) as well change the order of the fields by dragging the attributes, and set if the field is mandatory or not. Notice how I changed the custom attribute “Gender” to display as a drop down list and have a fixed items such as “Male” and “Female”. All this can be done by selecting the “Page UI customization” section.
  • Once the policy has been created you can configure the ID token, and refresh token expiration date time by selecting the section “Toke, session & SSO config”. I will cover this in the coming posts, for now we will keep the defaults for all policies we will create, and you can read more about this here.
  • Configuring ID token and refresh token expiration times is done pair policy not tenant, IMO I do not know why this was not done per tenant, not per policy, this for sure gives you better flexibility and finer grained control on how to manage policies, but I can not think of a use case where you want to have a different expiration dates for different policies. We will keep them the same for all policies we will create unless we are testing out something.
  • Below an image on how to change the custom attribute “Gender” order between other fields as well how to the “User input type” to use Drop down list:

Azure B2C Edit Attribute

Step 10: Creating the Sign in and Edit Profile policies

I won’t bore you with the repeated details for creating the other 2 policies which will be using during this tutorial, they all follow the same approach I have illustrated in the previous step. Please note the below about the newly created policies:

  • The policy which will be used to sign in the user (login) will be named “Signin“, so after creating it will be named “B2C_1_Signin“.
  • The policy which will be used to edit the created profile will be named “Editprofile“, so after creating it will be named “B2C_1_Editprofile“.
  • Do not forget to configure the Gender custom attribute for the “Editprofile” policy, as we need to display the values in the drop-down list instead of a text box.
  • Select the same claims we have already selected for the signup policy (8 claims)
  • You can click “Run now” button and test the new policies using a user that you already created from the sign up policy (Jump to next step before).
  • At the mean time the only way to execute those policies and test them out in this post and the coming one is to use the “Run now” button until I build a web application which communicates with the Web API and Azure Active Directory B2C tenant.

Step 11: Testing the created signup policy in Azure AD B2C tenant

Azure Active Directory B2C provide us with the ability to test the policies locally without leaving the azure portal, to do so all you need to click on is the “Run now” button and select the preferred Reply URL in case you registered many Reply URL once you registered the App, in our case we will have only a single app and a single reply URL. The Reply URL will be used to return the Id token in hash fragment to the Reply URL selected.

Once you click “Run now” button a new window will open and you will be able to test the sign up policy by filling up the needed information, notice that you need to use a real email in order to send activation code to it and verify that you own this email, I believe the Azure AD team implemented by verifying the email before creating the account to avoid creating many unreal emails that will never get verified. Smart decision.

Once you receive the verification email with the six digit code, you need to enter it in the verification code text box and click on “verify”, if all is good the “Create” button is enabled and you can complete filling the profile. You can change the content of the email by following this link

The password policy (complexity) used here is the same one used in Azure Active Directory, you can read more about it here.

After you fill all the mandatory attributes as the image below click create and you will notice that a redirect took place to the Reply URL and there is an Id Token returned as a hash fragment. This Id token contains all the claims specified in the policy, you can test it out by using a JWT debugging tool such as calebb.net so if we tried to debug the token we’ve received after running the sign up policy, you will see all the claims we asked for encoded in this JWT token.

Azure AD B2C Signup Test

Notes about the claims:

  • The newly “Gender” custom attribute we have added is returned under a claim named “extension_Gender“. It seems that all the custom attributed are prefixed by the phrase “extension”, I need to validate this with Azure AD team.
  • The globally user unique identifier is returned in the claim named “oid”, we will depend on this claim value to distinguish between registered users.
  • This token is generated based on the policy named “B2C_1_signup”, note the claim named “tfp”.

To have a better understanding of each claim meaning, please check this link.

{
  "exp": 1471954089,
  "nbf": 1471950489,
  "ver": "1.0",
  "iss": "https://login.microsoftonline.com/tfp/3d960283-c08d-4684-b378-2a69fa63966d/b2c_1_signup/v2.0/",
  "sub": "Not supported currently. Use oid claim.",
  "aud": "bc348057-3c44-42fc-b4df-7ef14b926b78",
  "nonce": "defaultNonce",
  "iat": 1471950489,
  "auth_time": 1471950489,
  "oid": "31ef9c5f-6416-48b8-828d-b6ce8db77d61",
  "emails": [
    "ahmad.hasan@gmail.com"
  ],
  "newUser": true,
  "given_name": "Ahmad",
  "family_name": "Hasan",
  "extension_Gender": "M",
  "name": "Ahmad Hasan",
  "country": "Jordan",
  "tfp": "B2C_1_signup"
}

This post turned out to be longer than anticipated so I will complete in the coming post , in the next post where I will show you how to reconfigure Our Web Api project to rely on out Azure AD B2C IdP and validate those tokens.

The source code for this tutorial is available on GitHub.

The MVC APP has been published on Azure App Services, so feel free to try it out using the Base URL (https://aadb2cmvcapp.azurewebsites.net)

Follow me on Twitter @tjoudeh

Resources

The post Azure Active Directory B2C Overview and Policies Management – Part 1 appeared first on Bit of Technology.


Andrew Lock: A look behind the JWT bearer authentication middleware in ASP.NET Core

A look behind the  JWT bearer authentication middleware in ASP.NET Core

This is the next in a series of posts about Authentication and Authorisation in ASP.NET Core. In the first post we had a general introduction to authentication in ASP.NET Core, and then in the previous post we looked in more depth at the cookie middleware, to try and get to grips with the process under the hood of authenticating a request.

In this post, we take a look at another middleware, the JwtBearerAuthenticationMiddleware, again looking at how it is implemented in ASP.NET Core as a means to understanding authentication in the framework in general.

What is Bearer Authentication?

The first concept to understand is Bearer authentication itself, which uses bearer tokens. According to the specification, a bearer token is:

A security token with the property that any party in possession of the token (a "bearer") can use the token in any way that any other party in possession of it can. Using a bearer token does not require a bearer to prove possession of cryptographic key material (proof-of-possession).

In other words, by presenting a valid token you will be automatically authenticated, without having to match or present any additional signature or details to prove it was granted to you. It is often used in the OAuth 2.0 authorisation framework, such as you might use when signing in to a third-party site using your Google or Facebook accounts for example.

In practice, a bearer token is usually presented to the remote server using the HTTP Authorization header:

Authorization: Bearer BEARER_TOKEN  

where BEARER_TOKEN is the actual token. An important point to bear in mind is that bearer tokens entitle whoever is in it's possession to access the resource it protects. That means you must be sure to only use tokens over SSL/TLS to ensure they cannot be intercepted and stolen.

What is a JWT?

A JSON Web Token (JWT) is a web standard that defines a method for transferring claims as a JSON object in such a way that they can be cryptographically signed or encrypted. It is used extensively in the internet today, in particular in many OAuth 2 implementations.

JWTs consist of 3 parts:

  1. Header: A JSON object which indicates the type of the token (JWT) and the algorithm used to sign it
  2. Payload: A JSON object with the asserted Claims of the entity
  3. Signature: A string created using a secret and the combined header and payload. Used to verify the token has not been tampered with.

These are then base64Url encoded and separated with a .. Using JSON Web Tokens allows you to send claims in a relatively compact way, and to protect them against modification using the signature. One of their main advantages is that they can allow stateless applications by including the storing the required claims in the token, rather than server side in a session store.

I won't go into all the details of JWT tokens, or the OAuth framework here, as that is a huge topic on it's own. In this post I'm more interested in how the middleware and handlers interact with ASP.NET Core authentication framework. If you want to find out more about JSON web tokens, I recommend you check out jwt.io and auth0.com as they have some great information and tutorials.

Just to give a vague idea of what JSON Web Tokens looks like in practice, the payload and header given below:

{
  "alg": "HS256",
  "typ": "JWT"
}
{
  "name": "Andrew Lock"
}

could be encoded in the following header:

Authorisation: bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiQW5kcmV3IExvY2sifQ.RJJq5u9ITuNGeQmWEA4S8nnzORCpKJ2FXUthuCuCo0I  

JWT bearer authentication in ASP.NET Core

You can add JWT bearer authentication to your ASP.NET Core application using the Microsoft.AspNetCore.Authentication.JwtBearer package. This provides middleware to allow validating and extracting JWT bearer tokens from a header. There is currently no built-in mechanism for generating the tokens from your application, but if you need that functionality, there are a number of possible projects and solutions to enable that such as IdentityServer 4. Alternatively, you could create your own token middleware as is shown in this post.

Once you have added the package to your project.json, you need to add the middleware to your Startup class. This will allow you to validate the token and, if valid, create a ClaimsPrinciple from the claims it contains.

You can add the middleware to your application using the UseJwtBearerAuthentication extension method in your Startup.Configure method, passing in a JwtBearerOptions object:

app.UseJwtBearerAuthentication(new JwtBearerOptions  
{
    AutomaticAuthenticate = true,
    AutomaticChallenge = true,
    TokenValidationParameters = new TokenValidationParameters
    {
        ValidateIssuer = true,
        ValidIssuer = "https://issuer.example.com",

        ValidateAudience = true,
        ValidAudience = "https://yourapplication.example.com",

        ValidateLifetime = true,
    }
});

There are many options available on the JwtBearerOptions - we'll cover some of these in more detail later.

The JwtBearerMiddleware

in the previous post we saw that the CookieAuthenticationMiddleware inherits from the base AuthenticationMiddleware<T>, and the JwtBearerMiddleware is no different. When created, the middleware performs various precondition checks, and initialises some default values. The most important check is to initialise the ConfigurationManager, if it has not already been set.

The ConfigurationManager object is responsible for retrieving, refreshing and caching the configuration metadata required to validate JWTs, such as the issuer and signing keys. These can either be provided directly to the ConfigurationManager by configuring the JwtBearerOptions.Configuration property, or by using a back channel to fetch the required metadata from a remote endpoint. The details of this configuration is outside the scope of this article.

As in the cookie middleware, the middleware implements the only required method from the base class, CreateHandler(), and returns a newly instantiated JwtBearerHandler.

The JwtBearerHandler HandleAuthenticateAsync method

Again, as with the cookie authentication middleware, the handler is where all the work really takes place. JwtBearerHandler derives from AuthenticationHandler<JwtBearerOptions>, overriding the required HandleAuthenticateAsync() method.

This method is responsible for deserialising the JSON Web Token, validating it, and creating an appropriate AuthenticateResult with an AuthenticationTicket (if the validation was successful). We'll walk through the bulk of it in this section, but it is pretty long, so I'll gloss over some of it!

On MessageReceived

The first section of the HandleAuthenticateAsync method allows you to customise the whole bearer authentication method.

// Give application opportunity to find from a different location, adjust, or reject token
var messageReceivedContext = new MessageReceivedContext(Context, Options);

// event can set the token
await Options.Events.MessageReceived(messageReceivedContext);  
if (messageReceivedContext.CheckEventResult(out result))  
{
    return result;
}

// If application retrieved token from somewhere else, use that.
token = messageReceivedContext.Token;  

This section calls out to the MessageReceived event handler on the JwtBearerOptions object. You are provided the full HttpContext, as well as the JwtBearerOptions object itself. This allows you a great deal of flexibility in how your applications uses tokens. You could validate the token yourself, using any other side information you may require, and set the AuthenticateResult explicitly. If you take this approach and handle the authentication yourself, the method will just directly return the AuthenticateResult after the call to messageReceivedContext.CheckEventResult.

Alternatively, you could obtain the token from somewhere else, such as a different header, or even a cookie. In that case, the handler will use the provided token for all further processing.

Read Authorization header

In the next section, assuming a token was not provided by the messageReceivedContext, the method tries to read the token from the Authorization header:

if (string.IsNullOrEmpty(token))  
{
    string authorization = Request.Headers["Authorization"];

    // If no authorization header found, nothing to process further
    if (string.IsNullOrEmpty(authorization))
    {
        return AuthenticateResult.Skip();
    }

    if (authorization.StartsWith("Bearer ", StringComparison.OrdinalIgnoreCase))
    {
        token = authorization.Substring("Bearer ".Length).Trim();
    }

    // If no token found, no further work possible
    if (string.IsNullOrEmpty(token))
    {
        return AuthenticateResult.Skip();
    }
}

As you can see, if the header is not found, or it does not start with the string "Bearer ", then the remainder of the authentication is skipped. Authentication would pass to the next handler until it finds a middleware to handle it.

Update TokenValidationParameters

At this stage we have a token, but we still need to validate and deserialise it to a ClaimsPrinciple. The next section of HandleAuthenticationAsync uses the ConfigurationManager object created when the middleware was instantiated to update the issuer and signing keys that will be used to validate the token:

if (_configuration == null && Options.ConfigurationManager != null)  
{
    _configuration = await Options.ConfigurationManager.GetConfigurationAsync(Context.RequestAborted);
}

var validationParameters = Options.TokenValidationParameters.Clone();  
if (_configuration != null)  
{
    if (validationParameters.ValidIssuer == null && !string.IsNullOrEmpty(_configuration.Issuer))
    {
        validationParameters.ValidIssuer = _configuration.Issuer;
    }
    else
    {
        var issuers = new[] { _configuration.Issuer };
        validationParameters.ValidIssuers = (validationParameters.ValidIssuers == null ? issuers : validationParameters.ValidIssuers.Concat(issuers));
    }

    validationParameters.IssuerSigningKeys = (validationParameters.IssuerSigningKeys == null ? _configuration.SigningKeys : validationParameters.IssuerSigningKeys.Concat(_configuration.SigningKeys));
}

First _configuration, a private field, is updated with the latest (cached) configuration details from the ConfigurationManager. The TokenValidationParameters specified when configuring the middleware are then cloned for this request, and augmented with the additional configuration. Any other validation specified when the middleware was added will also be validated (for example, we included ValidateIssuer, ValidateAudience and ValidateLifetime requirements in the example above).

Validating the token

Everything is now set for validating the provided token. The JwtBearerOptions object contains a list of ISecurityTokenValidator so you can potentially use custom token validators, but the default is to use the built in JwtSecurityTokenHandler. This will validate the token, confirm it meets all the requirements and has not been tampered with, and then return a ClaimsPrinciple.

List<Exception> validationFailures = null;  
SecurityToken validatedToken;  
foreach (var validator in Options.SecurityTokenValidators)  
{
    if (validator.CanReadToken(token))
    {
        ClaimsPrincipal principal;
        try
        {
            principal = validator.ValidateToken(token, validationParameters, out validatedToken);
        }
        catch (Exception ex)
        {
            //... Logging etc

            validationFailures = validationFailures ?? new List<Exception>(1);
            validationFailures.Add(ex);
            continue;
        }

        // See next section - returning a success result.
    }
}

So for each ISecurityTokenValidator in the list, we check whether it can read the token, and if so attempt to validate and deserialise the principal. If that is successful, we continue on to the next section, if not, the call to ValidateToken will throw.

Thankfully, the built in JwtSecurityTokenHandler handles all the complicated details of implementing the JWT specification correctly, so as long as the ConfigurationManager is correctly setup, you should be able to validate most types of token.

I've glossed over the catch block somewhat, but we log the error, add it to the validationFailures error collection, potentially refresh the configuration from ConfigurationManager and try the next handler.

When validation is successful

If we successfully validate a token in the loop above, then we can create an authentication ticket from the principal provided.

Logger.TokenValidationSucceeded();

var ticket = new AuthenticationTicket(principal, new AuthenticationProperties(), Options.AuthenticationScheme);  
var tokenValidatedContext = new TokenValidatedContext(Context, Options)  
{
    Ticket = ticket,
    SecurityToken = validatedToken,
};

await Options.Events.TokenValidated(tokenValidatedContext);  
if (tokenValidatedContext.CheckEventResult(out result))  
{
    return result;
}
ticket = tokenValidatedContext.Ticket;

if (Options.SaveToken)  
{
    ticket.Properties.StoreTokens(new[]
    {
        new AuthenticationToken { Name = "access_token", Value = token }
    });
}

return AuthenticateResult.Success(ticket);  

Rather than returning a success result straight away, the handler first calls the TokenValidated event handler. This allows us to fully customise the extracted ClaimsPrincipal, even replacing it completely, or rejecting it at this stage by creating a new AuthenticateResult.

Finally the handler optionally stores the extracted token in the AuthenticationProperties of the AuthenticationTicket for use elsewhere in the framework, and returns the authenticated ticket using AuthenticateResult.Success.

When validation fails

If the security token could not be validated by any of the ISecurityTokenValidators, the handler gives one more chance to customise the result.

if (validationFailures != null)  
{
    var authenticationFailedContext = new AuthenticationFailedContext(Context, Options)
    {
        Exception = (validationFailures.Count == 1) ? validationFailures[0] : new AggregateException(validationFailures)
    };

    await Options.Events.AuthenticationFailed(authenticationFailedContext);
    if (authenticationFailedContext.CheckEventResult(out result))
    {
        return result;
    }

    return AuthenticateResult.Fail(authenticationFailedContext.Exception);
}

return AuthenticateResult.Fail("No SecurityTokenValidator available for token: " + token ?? "[null]");  

The AuthenticationFailed event handler is invoked, and again can set the AuthenticateResult directly. If the handler does not directly handle the event, or if there were no configured ISecurityTokenValidators that could handle the token, then authentication has failed.

Also worth noting is that any unexpected exceptions thrown from event handlers etc will result in a similar call to Options.Events.AuthenticationFailed before the exception bubbles up the stack.

The JwtBearerHandler HandleUnauthorisedAsync method

The other significant method in the JwtBearerHandler is HandleUnauthorisedAsync, which is called when a request requires authorisation but is unauthenticated. In the CookieAuthenticationMiddleware, this method redirects to a logon page, while in the JwtBearerHandler, a 401 will be returned, with the WWW-Authenticate header indicating the nature of the error, as per the specification.

Prior to returning a 401, the Options.Event handler gets one more attempt to handle the request with a call to Options.Events.Challenge. As before, this provides a great extensibility point should you need it, allowing you to customise the behaviour to your needs.

SignIn and SignOut

The last two methods in the JwtBearerHandler, HandleSignInAsync and HandleSignOutAsync simply throw a NotSupportedException when called. This makes sense when you consider that the tokens have to come from a different source.

To effectively 'sign in', a client must request a token from the (remote) issuer and provide it when making requests to your application. Signing out from the handler's point of view would just require you to discard the token, and not send it with future requests.

Summary

In this post we looked in detail at the JwtBearerHandler as a means to further understanding how authentication works in the ASP.NET Core framework. It is rare you would need to dive into this much detail when simply using the middleware, but hopefully it will help you get to grips of what is going on under the hood when you add it to your application.


Pedro Félix: Focus on the representation semantics, leave the transfer semantics to HTTP

A couple of days ago I was reading the latest OAuth 2.0 Authorization Server Metadata document version and my eye got caught on one sentence. On section 3.2, the document states

A successful response MUST use the 200 OK HTTP status code and return a JSON object using the “application/json” content type (…)

My first reaction was thinking that this specification was being redundant: of course a 200 OK HTTP status should be returned on a successful response. However, that “MUST” in the text made me think: is a 200 really the only acceptable response status code for a successful response? In my opinion, the answer is no.

For instance, if caching and ETags are being used, the client can send a conditional GET request (see Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests) using the If-None-Match header, for which a 304 (Not Modified) status code is perfectly acceptable. Another example is if the metadata location changes and the server responds with a 301 (Moved Permanently) or a 302 (Found) status code.Does that means the request was unsuccessful? In my opinion, no. It just means that the request should be followed by a subsequent request to another location.

So, why does this little observation deserve a blog post?
Well, mainly because it reflects two common tendencies when designing HTTP APIs (or HTTP interfaces):

  • First, the tendency to redefine transfer semantics that are already defined by HTTP.
  • Secondly, a very simplistic view of HTTP, ignoring parts such as caching and optimistic concurrency.

The HTTP specification already defines a quite rich set of mechanisms for representation transfer, and HTTP related specifications should take advantage of that. What HTTP does not define is the semantics of the representation itself. That should be the focus of specifications such as the OAuth 2.0 Authorization Server Metadata.

When defining HTTP APIs, focus on the representation semantics. The transfer semantics is already defined by the HTTP protocol.

 



Dominick Baier: Why does my Authorize Attribute not work?

Sad title, isn’t it? The alternative would have been “The complicated relationship between claim types, ClaimsPrincipal, the JWT security token handler and the Authorize attribute role checks” – but that wasn’t very catchy.

But the reality is, that many people are struggling with getting role-based authorization (e.g. [Authorize(Roles = “foo”)]) to work – especially with external authentication like IdentityServer or other identity providers.

To fully understand the internals I have to start at the beginning…

IPrincipal
When .NET 1.0 shipped, it had a very rudimentary authorization API based on roles. Microsoft created the IPrincipal interface which specified a bool IsInRole(string roleName). They also created a couple of implementations for doing role-based checks against Windows groups (WindowsPrincipal) and custom data stores (GenericPrincipal).

The idea behind putting that authorization primitive into a formal interface was to create higher level functionality for doing role-based authorization. Examples of that are the PrincipalPermissionAttribute, the good old web.config Authorization section…and the [Authorize] attribute.

Moving to Claims
In .NET 4.5 the .NET team did a radical change and injected a new base class into all existing principal implementations – ClaimsPrincipal. While claims were much more powerful than just roles, they needed to maintain backwards compatibility. In other words, what was supposed to happen if someone moved a pre-4.5 application to 4.5 and called IsInRole? Which claim will represent roles?

To make the behaviour configurable they introduced the RoleClaimType (and also NameClaimType) property on ClaimsIdentity. So practically speaking, when you call IsInRole, ClaimsPrincipal check its identities if a claim of whatever type you set on RoleClaimType with the given value is present. As a default value they decided on re-using a WS*/SOAP -era proprietary type they introduced with WIF (as part of the ClaimTypes class): http://schemas.microsoft.com/ws/2008/06/identity/claims/role.

So to summarize, if you call IsInRole, by default the assumption is that your claims representing roles have the type mentioned above – otherwise the role check will not succeed.

When you are staying within the Microsoft world and their guidance, you will probably always use the ClaimTypes class which has a Role member that maps to the above claim type. This will make role checks automagically work.

Fast forward to modern Applications and OpenID Connect
When you are working with external identity providers, the chance is quite low that they will use the Microsoft legacy claim types. They will rather use the more modern standard OpenID Connect claim types.

In that case you need to be aware of the default behaviour of ClaimsPrincipal – and either set the NameClaimType and RoleClaimType to the right values manually – or transform the external claims types to Microsoft’s claim types.

The latter approach is what Microsoft implemented (of course) in their JWT validation library. The JWT handler tries to map all kinds of external claim types to the corresponding values on the ClaimTypes class – e.g. role to http://schemas.microsoft.com/ws/2008/06/identity/claims/role.

I personally don’t like that, because I think that claim types are an explicit contract in your application, and changing them should be part of application logic and claims transformation – and not a “smart” feature of token validation. That’s why you will always see the following line in my code:

JwtSecurityTokenHandler.InboundClaimTypeMap.Clear();

..which turns the mapping off. Newer versions of the handler call it DefaultInboundClaimTypeMap.

Setting the claim types manually
The constructor of ClaimsIdentity allows setting the claim types explicitly:

var id = new ClaimsIdentity(claims, “authenticationType”, “name”, “role”);
var p = new ClaimsPrincipal(id);

Also the token validation parameters object used by the JWT library has that feature. It bubbles up to e.g. the OpenID Connect authentication middleware like this:

var oidcOptions = new OpenIdConnectOptions
{
    AuthenticationScheme = "oidc",
    SignInScheme = "cookies",
 
    Authority = Clients.Constants.BaseAddress,
    ClientId = "mvc.implicit",
    ResponseType = "id_token",
    SaveTokens = true,
 
    TokenValidationParameters = new TokenValidationParameters
    {
        NameClaimType = "name",
        RoleClaimType = "role",
    }
};

Other JWT related libraries have the same capabilities – just have a look around.

Summary
Role checks are legacy – they only exist in the (Microsoft) claims world because of backwards compatibility with IPrincipal. There’s no need for them anymore – and you shouldn’t do role checks. If you want to check for the existence of specific claims – simply query the claims collection for what you are looking for.

If you need to bring old code that uses role checks forward, either let the JWT handler do some magic for you, or take control over the claim types yourself. You probably know by now what I would do ;)

 

…oh – and just in case you were looking for some practical advice here. The next time your [Authorize] attribute does not behave as expected – bring up the debugger, inspect your ClaimsPrincipal (e.g. Controller.User) and compare the RoleClaimType property with the claim type that holds your roles. If they are different – there’s your answer.

Screenshot 2016-08-21 14.20.28

 

 


Filed under: .NET Security, OAuth, OpenID Connect, WebAPI


Damien Bowden: ASP.NET Core logging with NLog and Elasticsearch

This article shows how to Log to Elasticsearch using NLog in an ASP.NET Core application. NLog is a free open-source logging for .NET.

Code: https://github.com/damienbod/AspNetCoreNlog

NLog posts in this series:

  1. ASP.NET Core logging with NLog and Microsoft SQL Server
  2. ASP.NET Core logging with NLog and Elasticsearch
  3. Settings the NLog database connection string in the ASP.NET Core appsettings.json

NLog.Extensions.Logging is required to use NLog in an ASP.NET Core application. This is added to the dependencies of the project. NLog.Targets.ElasticSearch is also added to the dependencies. This project is at present NOT the NuGet package from ReactiveMarkets, but the source code from ReactiveMarkets and updated to dotnetcore. Thanks to ReactiveMarkets for this library, hopefully the NuGet package will be updated and the NuGet package can be used directly.

The NLog configuration file also needs to be added to the publishOptions in the project.json file.

"dependencies": {
	"Microsoft.NETCore.App": {
		"version": "1.0.0",
		"type": "platform"
	},
	"Microsoft.AspNetCore.Mvc": "1.0.0",
	"Microsoft.AspNetCore.Server.IISIntegration": "1.0.0",
	"Microsoft.AspNetCore.Diagnostics": "1.0.0",
	"Microsoft.AspNetCore.Server.Kestrel": "1.0.0",
	"Microsoft.Extensions.Configuration.EnvironmentVariables": "1.0.0",
	"Microsoft.Extensions.Configuration.FileExtensions": "1.0.0",
	"Microsoft.Extensions.Configuration.Json": "1.0.0",
	"Microsoft.Extensions.Logging": "1.0.0",
	"Microsoft.Extensions.Logging.Console": "1.0.0",
	"Microsoft.Extensions.Logging.Debug": "1.0.0",
	"Microsoft.Extensions.Options.ConfigurationExtensions": "1.0.0",
	"NLog.Extensions.Logging": "1.0.0-rtm-alpha4",
	"NLog.Targets.ElasticSearch": "1.0.0-*"
},

"publishOptions": {
    "include": [
        "wwwroot",
        "Views",
        "Areas/**/Views",
        "appsettings.json",
        "web.config",
        "nlog.config"
    ]
},

The NLog configuration is added to the Startup.cs class in the Configure method.

public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
{
	loggerFactory.AddNLog();

	var configDir = "C:\\git\\damienbod\\AspNetCoreNlog\\Logs";

	if (configDir != string.Empty)
	{
		var logEventInfo = NLog.LogEventInfo.CreateNullEvent();


		foreach (FileTarget target in LogManager.Configuration.AllTargets.Where(t => t is FileTarget))
		{
			var filename = target.FileName.Render(logEventInfo).Replace("'", "");
			target.FileName = Path.Combine(configDir, filename);
		}

		LogManager.ReconfigExistingLoggers();
	}

	//env.ConfigureNLog("nlog.config");

	//loggerFactory.AddConsole(Configuration.GetSection("Logging"));
	//loggerFactory.AddDebug();

	app.UseMvc();
}

The nlog.config target and rules can be configured to log to Elasticsearch. NLog.Targets.ElasticSearch is an extension and needs to be added using the extensions tag.

<?xml version="1.0" encoding="utf-8" ?>
<nlog xmlns="http://www.nlog-project.org/schemas/NLog.xsd"
      xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
      autoReload="true"
      internalLogLevel="Warn"
      internalLogFile="C:\git\damienbod\AspNetCoreNlog\Logs\internal-nlog.txt">
    
    <extensions>
        <add assembly="NLog.Targets.ElasticSearch"/>
    </extensions>
            
  <targets>

    <target name="ElasticSearch" xsi:type="BufferingWrapper" flushTimeout="5000">
      <target xsi:type="ElasticSearch"/>
    </target>
   
  </targets>

  <rules>
    <logger name="*" minlevel="Trace" writeTo="ElasticSearch" />
      
  </rules>
</nlog>

The NLog.Targets.ElasticSearch package Elasticsearch URL can be configured using the ElasticsearchUrl property. This can be defined in the appsettings configuration file.

{
    "Logging": {
        "IncludeScopes": false,
        "LogLevel": {
            "Default": "Debug",
            "System": "Information",
            "Microsoft": "Information"
        }
    },
    "ElasticsearchUrl": "http://localhost:9200"
}

NLog.Targets.ElasticSearch ( ReactiveMarkets )

The existing NLog.Targets.ElasticSearch project from ReactiveMarkets is updated to a NETStandard Library. This class library requires Elasticsearch.Net, NLog and Newtonsoft.Json. The dependencies are added to the project.json file. The library supports both netstandard1.6 and also net451.

{
  "version": "1.0.0-*",

    "dependencies": {
        "NETStandard.Library": "1.6.0",
        "NLog": "4.4.0-betaV15",
        "Newtonsoft.Json": "9.0.1",
        "Elasticsearch.Net": "2.4.3",
        "Microsoft.Extensions.Configuration": "1.0.0",
        "Microsoft.Extensions.Configuration.FileExtensions": "1.0.0",
        "Microsoft.Extensions.Configuration.Json": "1.0.0"
    },

    "frameworks": {
        "netstandard1.6": {
            "imports": "dnxcore50"
        },
        "net451": {
            "frameworkAssemblies": {
                "System.Runtime.Serialization": "",
                "System.Runtime": ""
            }
        }
    }
}

The StringExtensions class is extended to make it possible to define the Elasticsearch URL in a configuration file.
( original code from ReactiveMarkets )

using System;
using System.IO;
#if NET45
#else
using Microsoft.Extensions.Configuration;
#endif

namespace NLog.Targets.ElasticSearch
{
    internal static class StringExtensions
    {
        public static object ToSystemType(this string field, Type type)
        {
            switch (type.FullName)
            {
                case "System.Boolean":
                    return Convert.ToBoolean(field);
                case "System.Double":
                    return Convert.ToDouble(field);
                case "System.DateTime":
                    return Convert.ToDateTime(field);
                case "System.Int32":
                    return Convert.ToInt32(field);
                case "System.Int64":
                    return Convert.ToInt64(field);
                default:
                    return field;
            }
        }

        public static string GetConnectionString(this string name)
        {
            var value = GetEnvironmentVariable(name);
            if (!string.IsNullOrEmpty(value))
                return value;
#if NET45
            var connectionString = ConfigurationManager.ConnectionStrings[name];
            return connectionString?.ConnectionString;
#else
            IConfigurationRoot configuration;
            var builder = new Microsoft.Extensions.Configuration.ConfigurationBuilder()
                .SetBasePath(Directory.GetCurrentDirectory())
                .AddJsonFile("appsettings.json", optional: true, reloadOnChange: true);

            configuration = builder.Build();
            return configuration["ElasticsearchUrl"];
#endif

        }

        private static string GetEnvironmentVariable(this string name)
        {
            return string.IsNullOrEmpty(name) ? null : Environment.GetEnvironmentVariable(name);
        }
    }
}

When the application is started the logs are written to Elasticsearch. These logs can be viewed in Elasticsearch

http://localhost:9200/logstash-‘date’/_search

{
	"took": 2,
	"timed_out": false,
	"_shards": {
		"total": 5,
		"successful": 5,
		"failed": 0
	},
	"hits": {
		"total": 18,
		"max_score": 1.0,
		"hits": [{
			"_index": "logstash-2016.08.19",
			"_type": "logevent",
			"_id": "AVaiJHPycDWw4BKmTWqP",
			"_score": 1.0,
			"_source": {
				"@timestamp": "2016-08-19T09:31:44.5790894Z",
				"level": "Debug",
				"message": "2016-08-19 11:31:44.5790|DEBUG|Microsoft.AspNetCore.Hosting.Internal.WebHost|Hosting starting"
			}
		},
		{
			"_index": "logstash-2016.08.19",
			"_type": "logevent",
			"_id": "AVaiJHPycDWw4BKmTWqU",
			"_score": 1.0,
			"_source": {
				"@timestamp": "2016-08-19T09:31:45.4788003Z",
				"level": "Info",
				"message": "2016-08-19 11:31:45.4788|INFO|Microsoft.AspNetCore.Hosting.Internal.WebHost|Request starting HTTP/1.1 DEBUG http://localhost:55423/  0"
			}
		},
		{
			"_index": "logstash-2016.08.19",
			"_type": "logevent",
			"_id": "AVaiJHPycDWw4BKmTWqW",
			"_score": 1.0,
			"_source": {
				"@timestamp": "2016-08-19T09:31:45.6248512Z",
				"level": "Debug",
				"message": "2016-08-19 11:31:45.6248|DEBUG|Microsoft.AspNetCore.Server.Kestrel|Connection id \"0HKU82EHFC0S9\" completed keep alive response."
			}
		},

Links

https://github.com/NLog/NLog.Extensions.Logging

https://github.com/ReactiveMarkets/NLog.Targets.ElasticSearch

https://github.com/NLog

https://docs.asp.net/en/latest/fundamentals/logging.html

https://msdn.microsoft.com/en-us/magazine/mt694089.aspx

https://github.com/nlog/NLog/wiki/Database-target

https://www.elastic.co/products/elasticsearch

https://github.com/elastic/logstash

https://github.com/elastic/elasticsearch-net

https://www.nuget.org/packages/Elasticsearch.Net/

https://github.com/nlog/NLog/wiki/File-target#size-based-file-archival

http://www.danesparza.net/2014/06/things-your-dad-never-told-you-about-nlog/



Andrew Lock: How to set the hosting environment in ASP.NET Core

How to set the hosting environment in ASP.NET Core

When running ASP.NET Core apps, the WebHostBuilder will automatically attempt to determine which environment it is running in. By convention, this will be one of Development, Staging or Production but you can set it to any string value you like.

The IHostingEnvironment allows you to programatically retrieve the current environment so you can have environment-specific behaviour. For example, you could enable bundling and minification of assets when in the Production environment, while serving files unchanged in the Development environment.

In this post I'll show how to change the current hosting environment used by ASP.NET Core using environment variables on Windows and OS X, using Visual Studio and Visual Studio Code, or by using command line arguments.

Changing the hosting environment

ASP.NET Core uses the ASPNETCORE_ENVIRONMENT environment variable to determine the current environment. By default, if you run your application without setting this value, it will automatically default to the Production environment.

When you run your application using dotnet run, the console output lists the current hosting environment in the output:

> dotnet run
Project TestApp (.NETCoreApp,Version=v1.0) was previously compiled. Skipping compilation.

Hosting environment: Production  
Content root path: C:\Projects\TestApp  
Now listening on: http://localhost:5000  
Application started. Press Ctrl+C to shut down.  

There are a number of ways to set this environment variable, the method that is best depends on how you are building and running your applications.

Setting the environment variable in Windows

The most obvious way to change the environment is to update the environment variable on your machine. This is useful if you know, for example, that applications run on that machine will always be in a given environment, whether that is Development, Staging or Production.

On Windows, there are a number of ways to change the environment variables, depending on what you are most comfortable with.

At the command line

You can easily set an environment variable from a command prompt using the setx.exe command included in Windows since Vista. You can use it to easily set a user variable:

>setx ASPNETCORE_ENVIRONMENT "Development"

SUCCESS: Specified value was saved.

Note that the environment variable is not set in the current open window. You will need to open a new command prompt to see the updated environment. It is also possible to set system variables (rather than just user variables) if you open an administrative command prompt and add the /M switch:

>setx ASPNETCORE_ENVIRONMENT "Development" /M

SUCCESS: Specified value was saved.

Using PowerShell

Alternatively, you can use PowerShell to set the variable. In PowerShell, as well as the normal user and system variables, you can also create a temporary variable using the $Env: command:

$Env:ASPNETCORE_ENVIRONMENT = "Development"

The variable created lasts just for the duration of your PowerShell session - once you close the window the environment reverts back to its default value.

Alternatively, you could set the user or system environment variables directly. This method does not change the environment variables in the current session, so you will need to open a new PowerShell window to see your changes. As before, changing the system (Machine) variables will require administrative access

[Environment]::SetEnvironmentVariable("ASPNETCORE_ENVIRONMENT", "Development", "User")
[Environment]::SetEnvironmentVariable("ASPNETCORE_ENVIRONMENT", "Development", "Machine")

Using the windows control panel

If you're not a fan of the command prompt, you can easily update your variables using your mouse!Click the windows start menu button (or press the Windows key), search for environment variables, and choose Edit environment variables for your account:

How to set the hosting environment in ASP.NET Core

Selecting this option will open the System Properties dialog:

How to set the hosting environment in ASP.NET Core

Click Environment Variables to view the list of current environment variables on your system.

How to set the hosting environment in ASP.NET Core

Assuming you do not already have a variable called ASPNETCORE_ENVIRONMENT, click the New... button and add a new account environment variable:

How to set the hosting environment in ASP.NET Core

Click OK to save all your changes. You will need to re-open any command windows to ensure the new environment variables are loaded.

Setting the environment variables on OS X

You can set an environment variable on OS X by editing or creating the .bash_profile file in your favourite editor (I'm using nano):

$ nano ~/.bash_profile

You can then export the ASPNETCORE_ENVIRONMENT variable. The variable will not be set in the current session, but will be updated when you open a new terminal window:

export ASPNETCORE_ENVIRONMENT=development  

Important, the command must be as is written above - there must be no spaces either side of the =. Also note that my bash knowledge is pretty poor, so if this approach doesn't work for you, I encourage you to go googling for one that does:)

Configuring the hosting environment using your IDE

Instead of updating the user or system environment variables, you can also configure the environment from your IDE, so that when you run or debug the application from there, it will use the correct environment.

Visual studio launchSettings.json

When you create an ASP.NET Core application using the Visual Studio templates, it automatically creates a launchSettings.json file. This file serves as the provider for the Debug targets when debugging with F5 in Visual Studio:

How to set the hosting environment in ASP.NET Core

When running with one of these options, Visual Studio will set the environment variables specified. In the file below, you can see the ASPNETCORE_ENVIRONMENT variable is set to Development.

{
  "iisSettings": {
    "windowsAuthentication": false,
    "anonymousAuthentication": true,
    "iisExpress": {
      "applicationUrl": "http://localhost:53980/",
      "sslPort": 0
    }
  },
  "profiles": {
    "IIS Express": {
      "commandName": "IISExpress",
      "launchBrowser": true,
      "environmentVariables": {
        "ASPNETCORE_ENVIRONMENT": "Development"
      }
    },
    "TestApp": {
      "commandName": "Project",
      "launchBrowser": true,
      "launchUrl": "http://localhost:5000",
      "environmentVariables": {
        "ASPNETCORE_ENVIRONMENT": "Development"
      }
    }
  }
}

You can also edit this file using the project Properties window. Just double click the Properties node in your solution, and select the Debug tab:

How to set the hosting environment in ASP.NET Core

Visual Studio Code launch.json

If you are using Visual Studio Code, there is a similar file, launch.json which is added when you first debug your application. This file contains a number of configurations one of which should be called ".NET Core Launch (web)". You can set additional environment variables when launching with this command by adding keys to the env property:

{
    "version": "0.2.0",
    "configurations": [
        {
            "name": ".NET Core Launch (web)",
            "type": "coreclr",
            "request": "launch",
            "preLaunchTask": "build",
            "program": "${workspaceRoot}/bin/Debug/netcoreapp1.0/TestApp.dll",
            "args": [],
            "cwd": "${workspaceRoot}",
            "stopAtEntry": false,
            "launchBrowser": {
                "enabled": true,
                "args": "${auto-detect-url}",
                "windows": {
                    "command": "cmd.exe",
                    "args": "/C start ${auto-detect-url}"
                },
                "osx": {
                    "command": "open"
                },
                "linux": {
                    "command": "xdg-open"
                }
            },
            "env": {
                "ASPNETCORE_ENVIRONMENT": "Development"
            },
            "sourceFileMap": {
                "/Views": "${workspaceRoot}/Views"
            }
        }
    ]
}

Setting hosting environment using command args

Depending on how you have configured your WebHostBuilder, you may also be able to specify the environment by providing a command line argument. To do so, you need to use a ConfigurationBuilder which uses the AddCommandLine() extension method from the Microsoft.Extensions.Configuration.CommandLine package. You can then pass your configuration to the WebHostBuilder using UseConfiguration(config):

var config = new ConfigurationBuilder()  
    .AddCommandLine(args)
    .Build();

var host = new WebHostBuilder()  
    .UseConfiguration(config)
    .UseContentRoot(Directory.GetCurrentDirectory())
    .UseKestrel()
    .UseIISIntegration()
    .UseStartup<Startup>()
    .Build();

This allows you to specify the hosting environment at run time using the --environment argument:

> dotnet run --environment "Staging"

Project TestApp (.NETCoreApp,Version=v1.0) was previously compiled. Skipping compilation.

Hosting environment: Staging  
Content root path: C:\Projects\Repos\Stormfront.Support\src\Stormfront.Support  
Now listening on: http://localhost:5000  
Application started. Press Ctrl+C to shut down.  

Summary

In this post I showed a number of ways you can specify which environment you are currently running in. Which method is best will depend on your setup and requirements. However you choose, if you change the environment variable you will need to restart the Kestrel server, as the environment is determined as part of the server start up.

Altering the hosting environment allows you to configure your application differently at run time, enabling debugging tools in a development setting or optimisations in a production environment. For details on using the IHostingEnvironment service, checkout the documentation here.

One final point - environment variables are case insensitive, so you can use "Development", "development" or "DEVELOPMENT" to your heart's content.


Damien Bowden: ASP.NET Core logging with NLog and Microsoft SQL Server

This article shows how to setup logging in an ASP.NET Core application which logs to a Microsoft SQL Server using NLog.

Code: https://github.com/damienbod/AspNetCoreNlog

NLog posts in this series:

  1. ASP.NET Core logging with NLog and Microsoft SQL Server
  2. ASP.NET Core logging with NLog and Elasticsearch
  3. Settings the NLog database connection string in the ASP.NET Core appsettings.json

The NLog.Extensions.Logging is required to add NLog to a ASP.NET Core application. This package as well as the System.Data.SqlClient are added to the dependencies in the project.json file.

 "dependencies": {
        "Microsoft.NETCore.App": {
            "version": "1.0.0",
            "type": "platform"
        },
        "Microsoft.AspNetCore.Mvc": "1.0.0",
        "Microsoft.AspNetCore.Server.IISIntegration": "1.0.0",
        "Microsoft.AspNetCore.Diagnostics": "1.0.0",
        "Microsoft.AspNetCore.Server.Kestrel": "1.0.0",
        "Microsoft.Extensions.Configuration.EnvironmentVariables": "1.0.0",
        "Microsoft.Extensions.Configuration.FileExtensions": "1.0.0",
        "Microsoft.Extensions.Configuration.Json": "1.0.0",
        "Microsoft.Extensions.Logging": "1.0.0",
        "Microsoft.Extensions.Logging.Console": "1.0.0",
        "Microsoft.Extensions.Logging.Debug": "1.0.0",
        "Microsoft.Extensions.Options.ConfigurationExtensions": "1.0.0",
        "NLog.Extensions.Logging": "1.0.0-rtm-alpha4",
        "System.Data.SqlClient": "4.1.0"
  },

Now a nlog.config file is created and added to the project. This file contains the configuration for NLog. In the file, the targets for the logs are defined as well as the rules. An internal log file is also defined, so that if something is wrong with the logging configuration, you can find out why.

<?xml version="1.0" encoding="utf-8" ?>
<nlog xmlns="http://www.nlog-project.org/schemas/NLog.xsd"
      xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
      autoReload="true"
      internalLogLevel="Warn"
      internalLogFile="C:\git\damienbod\AspNetCoreNlog\Logs\internal-nlog.txt">
    
  <targets>
    <target xsi:type="File" name="allfile" fileName="nlog-all.log"
                layout="${longdate}|${event-properties:item=EventId.Id}|${logger}|${uppercase:${level}}|${message} ${exception}" />

    <target xsi:type="File" name="ownFile-web" fileName="nlog-own.log"
             layout="${longdate}|${event-properties:item=EventId.Id}|${logger}|${uppercase:${level}}|  ${message} ${exception}" />

    <target xsi:type="Null" name="blackhole" />

    <target name="database" xsi:type="Database" >

    <connectionString>
        Data Source=N275\MSSQLSERVER2014;Initial Catalog=Nlogs;Integrated Security=True;
    </connectionString>
<!--
  Remarks:
    The appsetting layouts require the NLog.Extended assembly.
    The aspnet-* layouts require the NLog.Web assembly.
    The Application value is determined by an AppName appSetting in Web.config.
    The "NLogDb" connection string determines the database that NLog write to.
    The create dbo.Log script in the comment below must be manually executed.

  Script for creating the dbo.Log table.

  SET ANSI_NULLS ON
  SET QUOTED_IDENTIFIER ON
  CREATE TABLE [dbo].[Log] (
      [Id] [int] IDENTITY(1,1) NOT NULL,
      [Application] [nvarchar](50) NOT NULL,
      [Logged] [datetime] NOT NULL,
      [Level] [nvarchar](50) NOT NULL,
      [Message] [nvarchar](max) NOT NULL,
      [Logger] [nvarchar](250) NULL,
      [Callsite] [nvarchar](max) NULL,
      [Exception] [nvarchar](max) NULL,
    CONSTRAINT [PK_dbo.Log] PRIMARY KEY CLUSTERED ([Id] ASC)
      WITH (PAD_INDEX  = OFF, STATISTICS_NORECOMPUTE  = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS  = ON, ALLOW_PAGE_LOCKS  = ON) ON [PRIMARY]
  ) ON [PRIMARY]
-->

          <commandText>
              insert into dbo.Log (
              Application, Logged, Level, Message,
              Logger, CallSite, Exception
              ) values (
              @Application, @Logged, @Level, @Message,
              @Logger, @Callsite, @Exception
              );
          </commandText>

          <parameter name="@application" layout="AspNetCoreNlog" />
          <parameter name="@logged" layout="${date}" />
          <parameter name="@level" layout="${level}" />
          <parameter name="@message" layout="${message}" />

          <parameter name="@logger" layout="${logger}" />
          <parameter name="@callSite" layout="${callsite:filename=true}" />
          <parameter name="@exception" layout="${exception:tostring}" />
      </target>
      
  </targets>

  <rules>
    <!--All logs, including from Microsoft-->
    <logger name="*" minlevel="Trace" writeTo="allfile" />

    <logger name="*" minlevel="Trace" writeTo="database" />
      
    <!--Skip Microsoft logs and so log only own logs-->
    <logger name="Microsoft.*" minlevel="Trace" writeTo="blackhole" final="true" />
    <logger name="*" minlevel="Trace" writeTo="ownFile-web" />
  </rules>
</nlog>

The nlog.config also needs to be added to the publishOptions in the project.json file.

 "publishOptions": {
    "include": [
        "wwwroot",
        "Views",
        "Areas/**/Views",
        "appsettings.json",
        "web.config",
        "nlog.config"
    ]
  },

Now the database can be setup. You can create a new database, or use and existing one and add the dbo.Log table to it using the script below.

  SET ANSI_NULLS ON
  SET QUOTED_IDENTIFIER ON
  CREATE TABLE [dbo].[Log] (
      [Id] [int] IDENTITY(1,1) NOT NULL,
      [Application] [nvarchar](50) NOT NULL,
      [Logged] [datetime] NOT NULL,
      [Level] [nvarchar](50) NOT NULL,
      [Message] [nvarchar](max) NOT NULL,
      [Logger] [nvarchar](250) NULL,
      [Callsite] [nvarchar](max) NULL,
      [Exception] [nvarchar](max) NULL,
    CONSTRAINT [PK_dbo.Log] PRIMARY KEY CLUSTERED ([Id] ASC)
      WITH (PAD_INDEX  = OFF, STATISTICS_NORECOMPUTE  = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS  = ON, ALLOW_PAGE_LOCKS  = ON) ON [PRIMARY]
  ) ON [PRIMARY]

The table in the database must match the configuration defined in the nlog.config file. The database target defines the connection string, the command used to add a log and also the parameters required.

You can change this as required. As yet, most of the NLog parameters, do not work with ASP.NET Core, but this will certainly change as it is in early development. The NLog.Web Nuget package, when completed will contain the ASP.NET Core parameters.

Now NLog can be added to the application in the Startup class in the configure method. The AddNLog extension method is used and the logging directory can be defined.

public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
{
    loggerFactory.AddNLog();

    var configDir = "C:\\git\\damienbod\\AspNetCoreNlog\\Logs";

    if (configDir != string.Empty)
    {
        var logEventInfo = NLog.LogEventInfo.CreateNullEvent();


        foreach (FileTarget target in LogManager.Configuration.AllTargets.Where(t =&gt; t is FileTarget))
        {
            var filename = target.FileName.Render(logEventInfo).Replace("'", "");
            target.FileName = Path.Combine(configDir, filename);
        }

        LogManager.ReconfigExistingLoggers();
    }

    //env.ConfigureNLog("nlog.config");

    //loggerFactory.AddConsole(Configuration.GetSection("Logging"));
    //loggerFactory.AddDebug();

    app.UseMvc();
}

Now the logging can be used, using the default logging framework from ASP.NET Core.

An example of an ActionFilter

using Microsoft.AspNetCore.Mvc.Filters;
using Microsoft.Extensions.Logging;

namespace AspNetCoreNlog
{
    public class LogFilter : ActionFilterAttribute
    {
        private readonly ILogger _logger;

        public LogFilter(ILoggerFactory loggerFactory)
        {
            _logger = loggerFactory.CreateLogger("LogFilter");
        }

        public override void OnActionExecuting(ActionExecutingContext context)
        {
            _logger.LogInformation("OnActionExecuting");
            base.OnActionExecuting(context);
        }

        public override void OnActionExecuted(ActionExecutedContext context)
        {
            _logger.LogInformation("OnActionExecuted");
            base.OnActionExecuted(context);
        }

        public override void OnResultExecuting(ResultExecutingContext context)
        {
            _logger.LogInformation("OnResultExecuting");
            base.OnResultExecuting(context);
        }

        public override void OnResultExecuted(ResultExecutedContext context)
        {
            _logger.LogInformation("OnResultExecuted");
            base.OnResultExecuted(context);
        }
    }
}

The action filter is added in the Startup ConfigureServices services.

public void ConfigureServices(IServiceCollection services)
{

    // Add framework services.
    services.AddMvc();

    services.AddScoped<LogFilter>();
}

And some logging can be added to a MVC controller.

using System;
using System.Collections.Generic;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Logging;

namespace AspNetCoreNlog.Controllers
{

    [ServiceFilter(typeof(LogFilter))]
    [Route("api/[controller]")]
    public class ValuesController : Controller
    {
        private  ILogger<ValuesController> _logger;

        public ValuesController(ILogger<ValuesController> logger)
        {
            _logger = logger;
        }

        [HttpGet]
        public IEnumerable Get()
        {
            _logger.LogCritical("nlog is working from a controller");
            throw new ArgumentException("way wrong");
            return new string[] { "value1", "value2" };
        }

When the application is started, the logs are written to a local file in the Logs folder and also to the database.

sqlaspnetdatabselogger_01

Notes

NLog for ASP.NET Core is in early development, and the documentation is for .NET and not for dotnetcore, so a lot of parameters, layouts, targets, etc do not work. This project is open source, so you can extend it and contribute to if if you want.

Links

https://github.com/NLog/NLog.Extensions.Logging

https://github.com/NLog

https://docs.asp.net/en/latest/fundamentals/logging.html

https://msdn.microsoft.com/en-us/magazine/mt694089.aspx

https://github.com/nlog/NLog/wiki/Database-target



Dominick Baier: Trying IdentityServer

We have a demo instance of IdentityServer3 on https://demo.identityserver.io.

I already used this for various samples (e.g. the OpenID Connect native clients) – and it makes it easy to try IdentityServer with your clients without having to deploy and configure anything yourself.

The Auth0 guys just released a nice OpenID Connect playground website that allows you to interact with arbitrary spec compliant providers. If you want to try it yourself with IdentityServer – click on the configuration link and use these settings:

Screenshot 2016-08-17 10.09.34

In essence you only need to provide the URL of the discovery document, the client ID and the secret. The rest gets configured automatically for you.

Pressing Start will bring you to our standard login page:

Screenshot 2016-08-17 11.22.56

You can either use bob / bob (or alice / alice) to log in – or use your Google account.

Logging in will bring you to the consent screen – and then back to the playground:

Screenshot 2016-08-17 11.24.24

Now you can exercise the code to token exchange as well as the validation. As a last step you can even jump directly to jwt.io for inspecting the identity token:

Screenshot 2016-08-17 11.27.05

The source code for the IdentityServer demo web site can be found here.

We also have a more client types preconfigured, e.g. OpenID Connect hybrid flow, implicit flow as well as clients using PKCE. You can see the full list here.

You can request the typical OpenID Connect scopes – as well as a scope called api. The resulting access token can then be used to call https://demo.identityserver.io/api/identity which in turn will echo back the token claims as a JSON document.

Screenshot 2016-08-17 11.45.50

Have fun!

 


Filed under: ASP.NET, IdentityServer, OpenID Connect, OWIN, Uncategorized, WebAPI


Dominick Baier: Commercial Support Options for IdentityServer

Many customers have asked us for production support for IdentityServer. While this is something we would love to provide, Brock and I can’t do that on our own because we can’t guarantee the response times.

I am happy to announce that we have now partnered with our good friends at Rock Solid Knowledge to provide commercial support for IdentityServer!

RSK has excellent people with deep IdentityServer knowledge and Brock and I will help out as 2nd level support if needed.

Head over to https://www.identityserver.com/ and get in touch with them!


Filed under: ASP.NET, IdentityServer, OAuth, OpenID Connect, WebAPI


Andrew Lock: Access services inside ConfigureServices using IConfigureOptions in ASP.NET Core

Access services inside ConfigureServices using IConfigureOptions in ASP.NET Core

In a recent post I showed how you could populate an IOptions<T> object from the database for the purposes of caching the query result. It wasn't the most flexible solution or really recommended but it illustrated the point.

However one of the issues I had with the solution was the need to access configured services from within the IOptions<T> configuration lambda, inside ConfigureServices itself.

The solution I came up with was to use the injected IServiceCollection to build an ISerivceProvider to get the configured service I needed. As I pointed out at the time, this serivce-locator pattern felt icky and wrong, but I couldn't see any other way of doing it.

Thankfully, and inspired by this post from Ben Collins, there is a much better solution to be had by utilising the IConfigureOptions<T> interface

The previous version

In my post, I had this (abbreviated) code, which was trying to access an Entity Framework Core DbContext in the Configure method to setup the MultitenancyOptions class:

public class Startup  
{
    public Startup(IHostingEnvironment env) { /* ... build configuration */ }

    public IConfigurationRoot Configuration { get; }

    public void ConfigureServices(IServiceCollection services)
    {
        // add MVC, connection string etc

        services.Configure<MultitenancyOptions>(  
            options =>
            {
                var scopeFactory = services
                    .BuildServiceProvider()
                    .GetRequiredService<IServiceScopeFactory>();

                using (var scope = scopeFactory.CreateScope())
                {
                    var provider = scope.ServiceProvider;
                    using (var dbContext = provider.GetRequiredService<ApplicationDbContext>())
                    {
                        options.AppTenants = dbContext.AppTenants.ToList();
                    }
                }
            });

        // add other services
    }

    public void Configure(IApplicationBuilder app) { /* ... configure pipeline */ }
}

Yuk. As you can see, the call to Configure is a mess. In order to obtain a scoped lifetime DbContext it has to build the service collection to produce an IServiceProvider, to then obtain an IServiceScopeFactory. From there it can create the correct scoping, create another IServiceProvider, and finally find the DbContext we actually need. This lambda has way too much going on, and 90% of it is plumbing.

If you're wondering why you shouldn't just fetch a DbContext directly from the first service provider, check out this twitter discussion between Julie Lerman, David Fowler and Shawn Wildermuth.

The new improved answer

So, now we know what we're working with, how do we improve it? Luckily, the ASP.NET team anticipated this issue - instead of providing a lambda for configuring the MultitenancyOptions object, we implement the IConfigureOptions<TOptions> interface, where TOptions: MultitenancyOptions. This interface has a single method, Configure, which is passed a constructed MultitenancyOptions object for you to update:

public class ConfigureMultitenancyOptions : IConfigureOptions<MultitenancyOptions>  
{
    private readonly IServiceScopeFactory _serviceScopeFactory;
    public ConfigureMultitenancyOptions(IServiceScopeFactory serivceScopeFactory)
    {
        _serviceScopeFactory = serivceScopeFactory;
    }

    public void Configure(MultitenancyOptions options)
    {
        using (var scope = _serviceScopeFactory.CreateScope())
        {
            var provider = scope.ServiceProvider;
            using (var dbContext = provider.GetRequiredService<ApplicationDbContext>())
            {
                options.AppTenants = dbContext.AppTenants.ToList();
            }
        }
    }
}

We then just need to register our configuration class in the normal ConfigureServices method, which becomes:

public void ConfigureServices(IServiceCollection services)  
{
    // add MVC, connection string etc

    services.AddSingleton<IConfigureOptions<MultitenancyOptions>, ConfigureMultitenancyOptions>();

    // add other services
}

The advantage of this approach is that the configuration class is created through the usual DI container, so can have dependencies injected simply through the constructor. There is still a slight complexity introduced by the fact we want MultitenancyOptions to have a singleton lifecycle. To prevent leaking a lifetime scope, we must inject an IServiceScopeFactory and create an explicit scope before retrieving our DbContext. Again, check out Julie Lerman's twitter conversation and associated post for more details on this.

The most important point here is that we are no longer calling BuildServiceProvider() in our Configure method, just to get a service we need. So just try and forget that I ever mentioned doing that ;)

Under the hood

In hindsight, I really should have guessed that this approach was possible, as the lambda approach is really just a specialised version of the IConfigureOptions approach.

Taking a look at the Options source code really shows how these two methods tie together. The Configure extension method on IServiceCollection that takes a lambda looks like the following (with precondition checks etc removed)

public static IServiceCollection Configure<TOptions>(  
    this IServiceCollection services, Action<TOptions> configureOptions)
{
    services.AddSingleton<IConfigureOptions<TOptions>>(new ConfigureOptions<TOptions>(configureOptions));
    return services;
}

All this method is doing is creating an instance of the ConfigureOptions<TOptions> class, passing in the configuration lambda, and registering that as a singleton. That looks suspiciously like our tidied up approach, the difference being that we left the instantiation of our ConfigureMultitenancyOptions to the DI system, instead of new-ing it up directly.

As is to be expected, the ConfigureOptions<TOptions>, which implements IConfigureOptions<TOptions> just calls the provided lambda in it's Configure method:

public class ConfigureOptions<TOptions> : IConfigureOptions<TOptions> where TOptions : class  
{
    public ConfigureOptions(Action<TOptions> action)
    {
        Action = action;
    }

    public Action<TOptions> Action { get; }

    public virtual void Configure(TOptions options)
    {
        Action.Invoke(options);
    }
}

So again, the only substantive difference between using the lambda approach and the IConfigureOptions approach is that the latter allows you to inject services into your options class to be used during configuration.

One final useful point to be ware of: you can register multiple instances of IConfigureOptions<TOptions> for the same TOptions. They will all be applied, and in the order they were added to the service collection in ConfigureServices. That allows you to do simple configuration in ConfigureServices using the Configure lambda, while using a separate implementation of IConfigureOptions elsewhere, if you're so inclined.


Dominick Baier: Fixing OAuth 2.0 with OpenID Connect?

I didn’t like Nat’s Fixing OAuth? post.

“For protecting a resource with low value, current RFC6749 and RFC6750 with an appropriate constraint should be good enough…For protecting a resource whose value is higher than a certain level, e.g., the write access to the Financial API, then it would be more appropriate to use a modified protocol.”

I agree that write access to a financial API is a high value operation (and security measure will go far beyond authentication and token requests) – but most users and implementers of OAuth 2.0 based system would surely disagree that their resources only have a low value.

Then on the other hand I agree that OAuth 2.0 (or rather RFC6749 and 6750) on its own indeed has its issues and I would advise against using it (important part “on its own”).

Instead I would recommend using OpenID Connect – all of the OAuth 2.0 problems regarding client to provider communication are already fixed in OIDC – metadata, signed protocol responses, sender authentication, nonces etc.

When we designed identity server, we always saw OpenID Connect as a “super-set” of OAuth 2.0 and always recommended against using OAuth without the OIDC parts. Some people didn’t like that – but applying sound constraints definitely helped security.

I really don’t understand why this is not the official messaging? Maybe it’s political?

Screenshot 2016-07-29 08.42.17.png

(no response)

Wrt to the issues around bearer tokens – well – I really, really don’t understand why proof of possession and HTTP signing takes that long and seems to be such a low priority. We successfully implemented PoP tokens in IdentityServer and customers are using it. Of course there are issues – there will always be issues. But sitting on a half done spec for years will definitely not solve them.

So my verdict is – for interactive applications, don’t use OAuth 2.0 on its own. Just use OpenID Connect and identity tokens in addition to access tokens – you don’t need to be a financial API to have proper security.

 


Filed under: IdentityServer, OAuth, OpenID Connect, WebAPI


Damien Bowden: Import, Export ASP.NET Core localized data as CSV

This article shows how localized data can be imported and exported using Localization.SqlLocalizer. The data is exported as CSV using the Formatter defined in the WebApiContrib.Core.Formatter.Csv package. The data can be imported using a file upload.

This makes it possible to export the applications localized data to a CSV format. A translation company can then translate the data, and it can be imported back into the application.

Code: https://github.com/damienbod/AspNet5Localization

The two required packages are added to the project.json file. The Localization.SqlLocalizer package is used for the ASP.NET Core localization. The WebApiContrib.Core.Formatter.Csv package defines the CSV InputFormatter and OutputFormatter.

"Localization.SqlLocalizer": "1.0.3",
"WebApiContrib.Core.Formatter.Csv": "1.0.0"

The packages are then added in the Startup class. The DBContext LocalizationModelContext is added and also the ASP.NET Core localization middleware. The InputFormatter and the OutputFormatter are alos added to the MVC service.

using System;
using System.Collections.Generic;
using System.Globalization;
using Localization.SqlLocalizer.DbStringLocalizer;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Localization;
using Microsoft.EntityFrameworkCore;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using Microsoft.Net.Http.Headers;
using WebApiContrib.Core.Formatter.Csv;

namespace ImportExportLocalization
{
    public class Startup
    {
        public Startup(IHostingEnvironment env)
        {
            var builder = new ConfigurationBuilder()
                .SetBasePath(env.ContentRootPath)
                .AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
                .AddJsonFile($"appsettings.{env.EnvironmentName}.json", optional: true)
                .AddEnvironmentVariables();
            Configuration = builder.Build();
        }

        public IConfigurationRoot Configuration { get; }

        // This method gets called by the runtime. Use this method to add services to the container.
        public void ConfigureServices(IServiceCollection services)
        {
            var sqlConnectionString = Configuration["DbStringLocalizer:ConnectionString"];

            services.AddDbContext<LocalizationModelContext>(options =>
                options.UseSqlite(
                    sqlConnectionString,
                    b => b.MigrationsAssembly("ImportExportLocalization")
                )
            );

            // Requires that LocalizationModelContext is defined
            services.AddSqlLocalization(options => options.UseTypeFullNames = true);

            services.AddMvc()
                .AddViewLocalization()
                .AddDataAnnotationsLocalization();

            services.Configure<RequestLocalizationOptions>(
                options =>
                {
                    var supportedCultures = new List<CultureInfo>
                        {
                            new CultureInfo("en-US"),
                            new CultureInfo("de-CH"),
                            new CultureInfo("fr-CH"),
                            new CultureInfo("it-CH")
                        };

                    options.DefaultRequestCulture = new RequestCulture(culture: "en-US", uiCulture: "en-US");
                    options.SupportedCultures = supportedCultures;
                    options.SupportedUICultures = supportedCultures;
                });

            var csvFormatterOptions = new CsvFormatterOptions();

            services.AddMvc(options =>
            {
                options.InputFormatters.Add(new CsvInputFormatter(csvFormatterOptions));
                options.OutputFormatters.Add(new CsvOutputFormatter(csvFormatterOptions));
                options.FormatterMappings.SetMediaTypeMappingForFormat("csv", MediaTypeHeaderValue.Parse("text/csv"));
            });

            services.AddScoped<ValidateMimeMultipartContentFilter>();
        }

        // This method gets called by the runtime. Use this method to configure the HTTP request pipeline.
        public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
        {
            loggerFactory.AddConsole(Configuration.GetSection("Logging"));
            loggerFactory.AddDebug();

            var locOptions = app.ApplicationServices.GetService<IOptions<RequestLocalizationOptions>>();
            app.UseRequestLocalization(locOptions.Value);

            if (env.IsDevelopment())
            {
                app.UseDeveloperExceptionPage();
                app.UseBrowserLink();
            }
            else
            {
                app.UseExceptionHandler("/Home/Error");
            }

            app.UseStaticFiles();

            app.UseMvc(routes =>
            {
                routes.MapRoute(
                    name: "default",
                    template: "{controller=Home}/{action=Index}/{id?}");
            });
        }
    }
}

The ImportExportController makes it possible to download all the localized data as a csv file. This is implemented in the GetDataAsCsv method. This file can then be emailed or whatever to a translation company. When the updated file is returned, it can be imported using the ImportCsvFileForExistingData method. The method accepts the file and updates the data in the database. It is also possible to add new csv data, but care has to be taken as the key has to match the configuration of the Localization.SqlLocalizer middleware.

using System;
using System.Collections.Generic;
using System.IO;
using System.Reflection;
using System.Threading.Tasks;
using Localization.SqlLocalizer.DbStringLocalizer;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Net.Http.Headers;
using Newtonsoft.Json;

namespace ImportExportLocalization.Controllers
{
    [Route("api/ImportExport")]
    public class ImportExportController : Controller
    {
        private IStringExtendedLocalizerFactory _stringExtendedLocalizerFactory;

        public ImportExportController(IStringExtendedLocalizerFactory stringExtendedLocalizerFactory)
        {
            _stringExtendedLocalizerFactory = stringExtendedLocalizerFactory;
        }

        // http://localhost:6062/api/ImportExport/localizedData.csv
        [HttpGet]
        [Route("localizedData.csv")]
        [Produces("text/csv")]
        public IActionResult GetDataAsCsv()
        {
            return Ok(_stringExtendedLocalizerFactory.GetLocalizationData());
        }

        [Route("update")]
        [HttpPost]
        [ServiceFilter(typeof(ValidateMimeMultipartContentFilter))]
        public IActionResult ImportCsvFileForExistingData(CsvImportDescription csvImportDescription)
        {
            // TODO validate that data is a csv file.
            var contentTypes = new List<string>();

            if (ModelState.IsValid)
            {
                foreach (var file in csvImportDescription.File)
                {
                    if (file.Length > 0)
                    {
                        var fileName = ContentDispositionHeaderValue.Parse(file.ContentDisposition).FileName.Trim('"');
                        contentTypes.Add(file.ContentType);

                        var inputStream = file.OpenReadStream();
                        var items = readStream(file.OpenReadStream());
                        _stringExtendedLocalizerFactory.UpdatetLocalizationData(items, csvImportDescription.Information);
                    }
                }
            }

            return RedirectToAction("Index", "Home");
        }

        [Route("new")]
        [HttpPost]
        [ServiceFilter(typeof(ValidateMimeMultipartContentFilter))]
        public IActionResult ImportCsvFileForNewData(CsvImportDescription csvImportDescription)
        {
            // TODO validate that data is a csv file.
            var contentTypes = new List<string>();

            if (ModelState.IsValid)
            {
                foreach (var file in csvImportDescription.File)
                {
                    if (file.Length > 0)
                    {
                        var fileName = ContentDispositionHeaderValue.Parse(file.ContentDisposition).FileName.Trim('"');
                        contentTypes.Add(file.ContentType);

                        var inputStream = file.OpenReadStream();
                        var items = readStream(file.OpenReadStream());
                        _stringExtendedLocalizerFactory.AddNewLocalizationData(items, csvImportDescription.Information);
                    }
                }
            }

            return RedirectToAction("Index", "Home");
        }

        private List<LocalizationRecord> readStream(Stream stream)
        {
            bool skipFirstLine = true;
            string csvDelimiter = ";";

            List<LocalizationRecord> list = new List<LocalizationRecord>();
            var reader = new StreamReader(stream);


            while (!reader.EndOfStream)
            {
                var line = reader.ReadLine();
                var values = line.Split(csvDelimiter.ToCharArray());
                if (skipFirstLine)
                {
                    skipFirstLine = false;
                }
                else
                {
                    var itemTypeInGeneric = list.GetType().GetTypeInfo().GenericTypeArguments[0];
                    var item = new LocalizationRecord();
                    var properties = item.GetType().GetProperties();
                    for (int i = 0; i < values.Length; i++)
                    {
                        properties[i].SetValue(item, Convert.ChangeType(values[i], properties[i].PropertyType), null);
                    }

                    list.Add(item);
                }

            }

            return list;
        }
    }
}

The index razor view has a download link and also 2 upload buttons to manage the localization data.


<fieldset>
    <legend style="padding-top: 10px; padding-bottom: 10px;">Download existing translations</legend>

    <a href="http://localhost:6062/api/ImportExport/localizedData.csv" target="_blank">localizedData.csv</a>

</fieldset>

<hr />

<div>
    <form enctype="multipart/form-data" method="post" action="http://localhost:6062/api/ImportExport/update" id="ajaxUploadForm" novalidate="novalidate">
        <fieldset>
            <legend style="padding-top: 10px; padding-bottom: 10px;">Upload existing CSV data</legend>

            <div class="col-xs-12" style="padding: 10px;">
                <div class="col-xs-4">
                    <label>Upload Information</label>
                </div>
                <div class="col-xs-7">
                    <textarea rows="2" placeholder="Information" class="form-control" name="information" id="information"></textarea>
                </div>
            </div>

            <div class="col-xs-12" style="padding: 10px;">
                <div class="col-xs-4">
                    <label>Upload CSV data</label>
                </div>
                <div class="col-xs-7">
                    <input type="file" name="file" id="fileInput">
                </div>
            </div>

            <div class="col-xs-12" style="padding: 10px;">
                <div class="col-xs-4">
                    <input type="submit" value="Upload Updated Data" id="ajaxUploadButton" class="btn">
                </div>
                <div class="col-xs-7">

                </div>
            </div>

        </fieldset>
    </form>
</div>

<div>
    <form enctype="multipart/form-data" method="post" action="http://localhost:6062/api/ImportExport/new" id="ajaxUploadForm" novalidate="novalidate">
        <fieldset>
            <legend style="padding-top: 10px; padding-bottom: 10px;">Upload new CSV data</legend>

            <div class="col-xs-12" style="padding: 10px;">
                <div class="col-xs-4">
                    <label>Upload Information</label>
                </div>
                <div class="col-xs-7">
                    <textarea rows="2" placeholder="Information" class="form-control" name="information" id="information"></textarea>
                </div>
            </div>

            <div class="col-xs-12" style="padding: 10px;">
                <div class="col-xs-4">
                    <label>Upload CSV data</label>
                </div>
                <div class="col-xs-7">
                    <input type="file" name="file" id="fileInput">
                </div>
            </div>

            <div class="col-xs-12" style="padding: 10px;">
                <div class="col-xs-4">
                    <input type="submit" value="Upload New Data" id="ajaxUploadButton" class="btn">
                </div>
                <div class="col-xs-7">

                </div>
            </div>

        </fieldset>
    </form>
</div>

The data can then be managed as required.

localizedDataCsvImportExport_01

The IStringExtendedLocalizerFactory offers all the import export functionality which is supported by Localization.SqlLocalizer. If anything else is required, please create an issue or use the source code and extend it yourself.

public interface IStringExtendedLocalizerFactory : IStringLocalizerFactory
{
	void ResetCache();

	void ResetCache(Type resourceSource);

	IList GetImportHistory();

	IList GetExportHistory();

	IList GetLocalizationData(string reason = "export");

	IList GetLocalizationData(DateTime from, string culture = null, string reason = "export");

	void UpdatetLocalizationData(List<LocalizationRecord> data, string information);

	void AddNewLocalizationData(List<LocalizationRecord> data, string information);
}

Links:

https://www.nuget.org/packages/Localization.SqlLocalizer/

https://www.nuget.org/packages/WebApiContrib.Core.Formatter.Csv/



Dominick Baier: .NET Core 1.0 is released, but where is IdentityServer?

In short: we are working on it.

Migrating the code from Katana to ASP.NET Core was actually mostly mechanical. But obviously new approaches and patterns have been introduced which might, or might not align directly with how we used to do things in IdentityServer3.

We also wanted to take the time to do some re-work and re-thinking, as well as doing some breaking changes that we couldn’t easily do before.

For a roadmap – in essence we will release a beta including the new UI interaction next week. Then we will have an RC by August and an RTM before the final ASP.NET/.NET Core tooling ships later this year.

Meanwhile we encourage you to try the current bits and give us feedback. The more the better.

Stay tuned.


Filed under: ASP.NET, IdentityServer, OAuth, OpenID Connect, WebAPI


Dominick Baier: Update for authentication & API access for native applications and IdentityModel.OidcClient

The most relevant spec for authentication and API access for native apps has been recently updated.

If you are “that kind of person” that enjoys looking at diffs of pre-release RFCs – you would have spotted a new way of dealing with the system browser for desktop operating systems (e.g. Windows or MacOS).

Quoting section 7.3:

“More applicable to desktop operating systems, some environments allow apps to create a local HTTP listener on a random port, and receive URI redirects that way.  This is an acceptable redirect URI choice for native apps on compatible platforms.”

IOW – your application launches a local “web server”, starts the system browser with a local redirect URI and waits for the response to come back (either a code or an error). This is much easier than trying to fiddle with custom URL monikers and such on desktop operating systems.

William Denniss – one of the authors of the above spec and the corresponding reference implementations – also created a couple of samples that show the usage of that technique for Windows desktop apps.

Inspired by that I, created a sample showing how to do OpenID Connect authentication from a console application using IdentityModel.OidcClient.

In a nutshell – it works like this:

Open a local listener

// create a redirect URI using an available port on the loopback address.
string redirectUri = string.Format("http://127.0.0.1:7890/");
Console.WriteLine("redirect URI: " + redirectUri);
 
// create an HttpListener to listen for requests on that redirect URI.
var http = new HttpListener();
http.Prefixes.Add(redirectUri);
Console.WriteLine("Listening..");
http.Start();

 

Construct the start URL, open the system browser and wait for a response

var options = new OidcClientOptions(
    "https://demo.identityserver.io",
    "native.code",
    "secret",
    "openid profile api",
    redirectUri);
options.Style = OidcClientOptions.AuthenticationStyle.AuthorizationCode;
 
var client = new OidcClient(options);
var state = await client.PrepareLoginAsync();
 
Console.WriteLine($"Start URL: {state.StartUrl}");
            
// open system browser to start authentication
Process.Start(state.StartUrl);
 
// wait for the authorization response.
var context = await http.GetContextAsync();

 

Process the response and access the claims and tokens

var result = await client.ValidateResponseAsync(context.Request.Url.AbsoluteUri, state);
 
if (result.Success)
{
    Console.WriteLine("\n\nClaims:");
    foreach (var claim in result.Claims)
    {
        Console.WriteLine("{0}: {1}", claim.Type, claim.Value);
    }
 
    Console.WriteLine();
    Console.WriteLine("Access token:\n{0}", result.AccessToken);
 
    if (!string.IsNullOrWhiteSpace(result.RefreshToken))
    {
        Console.WriteLine("Refresh token:\n{0}", result.RefreshToken);
    }
}
else
{
    Console.WriteLine("\n\nError:\n{0}", result.Error);
}
 
http.Stop();

 

Sample can be found here – have fun ;)

 

 


Filed under: IdentityModel, OAuth, OpenID Connect, WebAPI


Dominick Baier: Identity Videos, Podcasts and Slides from Conference Season 2016/1

My plan was to cut down on conferences and travelling in general – this didn’t work out ;) I did more conferences in the first 6 months of 2016 than I did in total last year. weird.

Here are some of the digital artefacts:

NDC Oslo 2016: Authentication & secure API access for native & mobile Applications

DevSum 2016: What’s new in ASP.NET Core Security

DevSum 2016: Buzzfrog Podcast with Dag König

DevWeek 2016: Modern Applications need modern Identity

DevWeek 2016: Implementing OpenID Connect and OAuth 2.0 with IdentityServer

All my slides are on speakerdeck.


Filed under: .NET Security, ASP.NET, Conferences & Training, IdentityModel, IdentityServer, OAuth, OpenID Connect, Uncategorized, WebAPI


Filip Woj: Inheriting route attributes in ASP.NET Web API

I was recently working on a project, where I had a need to inherit routes from a generic base Web API controller. This is not supported by Web API out of the box, but can be enabled with a tiny configuration tweak. Let’s have a look.

The problem with inheriting attribute routes

If you look at the definition of the RouteAttribute in ASP.NET Web API, you will see that it’s marked as an “inheritable” attribute. As such, it’s reasonable to assume that if you use that attribute on a base controller, it will be respected in a child controller you create off the base one.

However, in reality, that is not the case, and that’s due to the internal logic in DefaultDirectRouteProvider – which is the default implementation of the way how Web API discovers attribute routes.

We discussed this class (and the entire extensibility point, as the direct route provider can be replaced) before – for example when implementing a centralized route prefix for Web API.

So if this is your generic Web API code, it will not work out of the box:

public abstract class GenericController<TEntity> : ApiController where TEntity : class, IMyEntityDefinition, new()
{
    private readonly IGenericRepository<TEntity> _repo;

    protected GenericController(IGenericRepository<TEntity> repo)
    {
        _repo = repo;
    }

    [Route("{id:int}")]
    public virtual async Task<IHttpActionResult> Get(int id)
    {
        var result = await _repo.FindAsync(id);
        if (result == null)
        {
            return NotFound();
        }

        return Ok(result);
    }
}

[RoutePrefix("api/items")]public class ItemController : GenericController<Item>
{
    public GenericController(IGenericRepository<Item> repo) : base(repo)
    {}
}

Ignoring the implementation details of the repository pattern here, assuming all your dependency injection is configured already – with the above controller, trying to hit api/items/{id} is going to produce 404.

The solution for inheriting attribute routes

One of the methods that this default direct route provider exposes as overrideable, is the one shown below. It is responsible for extracting route attributes from an action descriptor:

protected virtual IReadOnlyList<IDirectRouteFactory> GetActionRouteFactories(HttpActionDescriptor actionDescriptor)
        {
            // Ignore the Route attributes from inherited actions.
            ReflectedHttpActionDescriptor reflectedActionDescriptor = actionDescriptor as ReflectedHttpActionDescriptor;
            if (reflectedActionDescriptor != null &&
                reflectedActionDescriptor.MethodInfo != null &&
                reflectedActionDescriptor.MethodInfo.DeclaringType != actionDescriptor.ControllerDescriptor.ControllerType)
            {
                return null;
            }

            Collection<IDirectRouteFactory> newFactories = actionDescriptor.GetCustomAttributes<IDirectRouteFactory>(inherit: false);

            Collection<IHttpRouteInfoProvider> oldProviders = actionDescriptor.GetCustomAttributes<IHttpRouteInfoProvider>(inherit: false);

            List<IDirectRouteFactory> combined = new List<IDirectRouteFactory>();
            combined.AddRange(newFactories);

            foreach (IHttpRouteInfoProvider oldProvider in oldProviders)
            {
                if (oldProvider is IDirectRouteFactory)
                {
                    continue;
                }

                combined.Add(new RouteInfoDirectRouteFactory(oldProvider));
            }

            return combined;
        }

Without going into too much details about this code – it’s clearly visible that it specifically ignores inherited route attributes (route attributes implement IDirectRouteFactory interface).

So in order to make our initial sample generic controller work, we need to override the above method and read all inherited routes. This is extremely simple and is shown below:

public class InheritanceDirectRouteProvider : DefaultDirectRouteProvider
{
    protected override IReadOnlyList<IDirectRouteFactory> GetActionRouteFactories(HttpActionDescriptor actionDescriptor)
    {
        return actionDescriptor.GetCustomAttributes<IDirectRouteFactory>(true);
    }
}

This can now be registered at the application startup against your HttpConfiguration – which is shown in the next snippet as an extension method + OWIN Startup class.

public static class HttpConfigurationExtensions
{
    public static void MapInheritedAttributeRoutes(this HttpConfiguration config)
    {
        config.MapHttpAttributeRoutes(new InheritanceDirectRouteProvider());
    }
}

public class Startup
{
    public void Configuration(IAppBuilder app)
    {
        var config = new HttpConfiguration();
        config.MapInheritedAttributeRoutes();
        app.UseWebApi(config);
    }
}

And that’s it!


Darrel Miller: Back to my core

I've spent a large part of the last two years playing the role of a technical marketeer.  Call it developer advocate, API Evangelist, or my favourite title, API Concierge, my role was to engage with developers and help them, in any way I could, to build better HTTP APIs.  I have really enjoyed the experience and had the opportunity to meet many great people.  However, the more you hear yourself talk about what people should do, the more you are reminded that you aren't actually doing the stuff you are talking about any more.  The time has come for me to stop just talking about building production systems and start doing it again.

Badge2

Code is the answer

Starting this month, I am joining Microsoft as a full time software developer.   I am rejoining the Azure API Management team, this time to actually help build the product.  I am happy to be working on a product that is all about helping people build better HTTP based applications in a shorter amount of time.  I'm also really happy to being on a team that really cares about how HTTP should be used and are determined to make these capabilities available to the widest possible audience.

API Management is one of those dreadfully named product categories that actually save developers real time and money when building APIs.  Do you really want to implement rate limiting, API token issuing and geolocated  HTTP caching?

As a platform for middleware, API Management products can help you solve all kinds of challenges related to security, deployment, scaling and versioning of HTTP based systems.  It’s definitely my cup of tea.

I am hoping to still have chance to do a few conferences a year and I definitely want to keep on blogging.  Perhaps you'll see some deeper technical content from me in the near future.  It's time to recharge those technical batteries and demonstrate that I can still walk the walk.

Interviews

Having just gone through the process of interviewing, I have some thoughts on the whole process.  I think it is fair to say that Microsoft have a fairly traditional interview process.  You spend a day taking to people from the hiring team and related teams.  You get the usual personal questions and questions about past experiences. When applying for a developer role you get a bunch of technical questions that usually require whiteboard coding on topics that are covered in college level courses.  I haven’t been in university for a very long time.  I can count the number of times I have had to reverse a linked list professionally on one hand.

These types of interview questions are typically met with scorn by experienced developers.  I have heard numerous people suggest alternative interview techniques that I believe would be more effective at determining if someone is a competent developer.

However, these are the hoops that candidates are asked to jump through.  It isn’t a surprise.  It is easy to find this out.  It is fairly easy to practice doing whiteboard coding and there are plenty of resources out there that demonstrate how to achieve many of these comp sci challenges.

I’ve heard developers say that if they were asked to perform such an irrelevant challenge on an interview that they would walk out.  I don’t look at it that way.  I consider it an arbitrary challenge and if I can do the necessary prep work to pass, then it is a reflection on my ability to deal with other challenges I may face.  Maybe these interviews are an artificial test, but I would argue so was university. I certainly didn’t learn how to write code while doing an engineering degree.

Remote Work

I’m not going to be moving to Redmond.  I’m going to continue living in Montreal and working for a Redmond based team.  We have one other developer who is remote, but is on the same timezone as the team.  It would be easier to do the job if I were in Redmond, but I can’t move for family reasons.  I’m actually glad that I can’t move, because I honestly think that remote work is the future for the tech industry.  Once a team gets used to working with remote team members, there really isn’t a downside and there are lots of upsides.

The tech industry has a major shortage of talent and a ridiculous tendancy to congregate in certain geographic locations, which causes significant economic problems.  Tech people don’t have any need to be physically close to collaborate.  We should take advantage of that.

But Microsoft?

There is lots of doom and gloom commentary around Microsoft in the circles that I frequent.  Lots of it is related to the issues around ASP.Net Core and .Net Core.  If you look a little into Microsoft’s history you will see whenever they attempt to make major changes that allow the next generation of products they get beaten up for it.  Windows Vista is a classic example.  It was perceived as huge failure, but it made the big changes that allowed Windows 7 to be successful. 

The Core stuff is attempting to do a major reset on 15 years of history.  Grumpiness is guaranteed.  It doesn’t worry me particularly.  Could they have done stuff better?  Sure.  Did I ever think that a few teams in Microsoft could have instigated such a radical amount of change? Nope, never. But it is going to take time.  Way more time than those who like living on the bleeding edge are going to be happy about.

There is a whole lot of change happening at Microsoft.  The majority of what I see is really encouraging.  The employees I have met so far are consistently enthusiastic about the company and many of the employees who have left the company will describe their time there very favourably.

Historically, Microsoft was notorious for its hated stack ranking performance review system.  I had heard that the system had been abolished but I had no idea what the replacement system was until last week.  Only time will tell whether the new system will actually work, but my initial impression is that it is going to have an extremely positive impact on Microsoft culture.  The essense of the system is that you are measured on your contributions to your team, the impact you have had on helping other employees succeed and how you have built on the work of others.  The system, as I understand it, is designed to reward collaboration within the company.  If that doesn’t have an impact on the infamous Microsoft org chart comic, I don’t know what will.

Building stuff is fun

I got hooked on the creative endevour of writing code 34 years ago and I hope to still be doing it for many more to come.


Pedro Félix: Client-side development on OS X using Windows hosted HTTP Web APIs

In a recent post I described my Android development environment, based on a OS X host, the Genymotion Android emulator, and a Windows VM to run the back-end HTTP APIs.
In this post I’ll describe a similar environment but now for browser-side applications, once again using Windows hosted HTTP APIs.

Recently I had to do some prototyping involving browser-based applications, using ES6 and React, that interact with IdentityServer3 and a HTTP API.
Both the IdentityServer3 server and the ASP.NET HTTP APIs are running on a Windows VM, however I prefer to use the host OS X environment for the client side development (node, npm, webpack, babel, …).
Another requirement is that the server side uses HTTPS and multiple name hosts (e.g. id.example.com, app1.example.com, app2.example.com), as described in this previous post.

The solution that I ended up using for this environment is the following:

  • On the Windows VM side I have Fiddler running on port 8888 with “Allow remote computer to connect” enabled. This means that Fiddler will act as a proxy even for requests originating from outside the Windows VM.
  • On the OS X host I launch Chrome with open -a “/Applications/Google Chrome.app” –args –proxy-server=10.211.55.3:8888 –proxy-bypass-list=localhost, where 10.221.55.3 is the Windows VM address. To automate this procedure I use the automator tool  to create a shell script based workflow.

The end result, depicted in the following diagram, is that all requests (except for localhost) will be forwarded to the Fiddler instance running on the Windows VM, which will use the Windows hosts file to direct the request to the multiple IIS sites.

hosting
As a bonus, I also have full visibility on the HTTP messages.

And that’s it. I hope it helps.



Pedro Félix: Using multiple IIS server certificates on Windows 7

Nowadays I do most of my Windows development on a Windows 7 VM running on OS X macOS (Windows 8 and Windows Server 2012 left some scars so I’m very reluctance on moving to Windows 10). On this development environment I like to mimic some production environment characteristics, namely:

  • Using IIS based hosting
  • Having each site using different host names
  • Using HTTPS

For the site names I typically use example.com subdomains (e.g. id.example.com, app1.example.com, app2.example.com), which are reserved by IANA for documentation purposes (see RFC 6761). I associate these names to local addresses via the hosts file.

For generating the server certificates I use makecert and the scripts published at Appendix G of the Designing Evolvable Web APIs with ASP.NET.

However, having multiple sites using distinct certificates hosted on the same IP and port address presents some challenges. This is because IIS/HTTP.SYS uses the Host header to demultiplex the incoming requests to the different sites bound to the same IP and port.
However, when using TLS, the server certificate must be provided on the TLS handshake, well before the TLS connection is established and the Host header is received. Since at this time HTTP.SYS does not know the target site it also cannot select the appropriate certificate.

Server Name Indication (SNI) is a TLS extension (see RFC 3546) that addresses this issue, by letting the client send the host name in the TLS handshake, allowing the server to identity the target site and use the corresponding certificate.

Unfortunately, HTTP.SYS on Windows 7 does not support SNI (that’s what I get for using 2009 operating systems). To circumvent this I took advantage of the fact that there are more loopback addresses other than 127.0.0.1. So, what I do is to use different loopback IP addresses for each site on my machine as illustrated by the following my hosts file excerpt

127.0.0.2 app1.example.com
127.0.0.3 app2.example.com
127.0.0.4 id.example.com

When I configure the HTTPS IIS bindings I explicitly configure the listening IP addresses using these different values for each site, which allows me to use different certificates.

And that’s it. Hope it helps.



Pedro Félix: The OpenID Connect Cast of Characters

Introduction

The OpenID Connect protocol provides support for both delegated authorization and federated authentication, unifying features that traditionally were provided by distinct protocols. As a consequence, the OpenID Connect protocol parties play multiple roles at the same time, which can sometimes be hard to grasp. This post aims to clarify this, describing how the OpenID Connect parties related to each other and to the equivalent parties in previous protocols, namely OAuth 2.0.

OAuth 2.0

The OAuth 2.0 authorization framework introduced a new set of characters into the distributed access control story.

oauth2-1

  • The User (aka Resource Owner) is a human with the capability to authorize access to a set of protected resources (e.g. the user is the resources owner).
  • The Resource Server is the HTTP server exposing access to the protected resources via an HTTP API. This access is dependent on the presence and validation of access tokens in the HTTP request.
  • The Client Application is the an HTTP client that accesses user resources on the Resource Server. To perform these accesses, the client application needs to obtain access tokens issued by the Authorization Server.
  • The Authorization Server is the party issuing the access tokens used by the Clients Application on the requests to the Resource Server.
  • Access Tokens are strings created by the Authorization Server and targeted to the Resource Server. They are opaque to the Client Application, which just obtains them from the Authorization Server and uses them on the Resource Server without any further processing.

To make things a little bit more concrete, leet’s look at an example

  • The User is Alice and the protected resources are her repositories at GitHub.
  • The Resource Server is GitHub’s API.
  • The Client Application is a third-party application, such as Huboard or Travis CI, that needs to access Alice’s repositories.
  • The Authorization Server is also GitHub, providing the OAuth 2.0 protocol “endpoints” for the client application to obtain the access tokens.

OAuth 2.0 models the Resource Server and the Authorisation Server as two distinct parties, however they can be run by the same organization (GitHub, in the previous example).

oauth2-2

An important characteristics to emphasise is that the access token does not directly provide any information about the User to the Client Application – it simply provides access to a set of protected resources. The fact that some of these protected resources may be used to provide information about the User’s identity is out of scope of OAuth 2.0.

Delegated Authentication and Identity Federation

However delegated authentication and identity federation protocols, such as the SAML protocols or the WS-Federation protocol, use a different terminology.

federation

  • The Relying Party (or Service Provider in the SAML protocol terminology) is typically a Web application that delegates user authentication into an external Identity Provider.
  • The Identity Provider is the entity authenticating the user and communicating her identity claims to the Relying Party.
  • The identity claims communication between these two parties is made via identity tokens, which are protected containers for identity claims
    • The Identity Provider creates the identity token.
    • The Relying Party consumes the identity token by validating it and using the contained identity claims.

Sometimes the same entity can play both roles. For instance, an Identity Provider can re-delegate the authentication process to another Identity Provider. For instance:

  • An Organisational Web application (e.g. order management) delegates the user authentication process to the Organisational Identity Provider.
  • However, this Organisational Identity Provider re-delegate user authentication into a Partner Identity Provider.
  • In this case, the Organisational Identity Provider is simultaneously
    • A Relying Party for the authentication made by the Partner Identity Provider.
    • An Identity Provider, providing identity claims to the Organisational Web Application.

federation-2

In these protocols, the main goal of the identity token is to provide identity information about the User to the Relying Party. Namely, the identity token is not aimed to provide access to a set of protected resources. This characteristic sharply contrasts with OAuth 2.0 access tokens.

OpenID Connect

The OpenID Connect protocol is “a simple identity layer on top of the OAuth 2.0 protocol”, providing both delegated authorisation as well as authentication delegation and identity federation. It unifies in a single protocol the functionalities that previously were provided by distinct protocols. As consequence, now there are multiple parties that play more than one role

  • The OpenID Provider (new term introduced by the OpenID Connect specification) is an Identity Provider and an Authorization Server, simultaneously issuing identity tokens and access tokens.
  • The Relying Party is also a Client Application. It receives both identity tokens and access tokens from the OpenID Provider. However, there is a significant different in how these tokens are used by this party
    • The identity tokens are consumed by the Relying Party/Client Application to obtain the user’s identity.
    • The access tokens are not directly consumed by the Relying Party. Instead they are attached to requests made to the Resource Server, without ever being opened at the Relying Party.

oidc

I hope this post shed some light into the dual nature of the parties in the OpenID Connect protocol.

Please, feel free to use the comments section to place any question.



Ben Foster: How to configure Kestrel URLs in ASP.NET Core RC2

Prior to the release of ASP.NET Core RC2 Kestrel would be configured as part of the command bindings in project.json:

"commands": {
  "web": "Microsoft.AspNet.Server.Kestrel --server.urls=http://localhost:60000;http://localhost:60001;"
},

If no URLs were specified, a default binding of http://localhost:5000 would be used.

As of RC2 we have a new unified toolchain (the .NET Core CLI) and ASP.NET Core applications are effectively just .NET Core Console Applications. They have a single entry point where we programatically configure and run the web host:

public static void Main(string[] args)
{
    var host = new WebHostBuilder()
        .UseKestrel()
        .UseContentRoot(Directory.GetCurrentDirectory())
        .UseIISIntegration()
        .UseStartup<Startup>()
        .Build();

    host.Run();
}

Here we're adding support for both Kestrel and IIS hosts via the appropriate extension methods.

When we upgraded SaasKit to RC2 we used the UseUrls extension to configure the URLs Kestrel would bind to:

var host = new WebHostBuilder()
    .UseKestrel()
    .UseContentRoot(Directory.GetCurrentDirectory())
    .UseUrls("http://localhost:60000", "http://localhost:60001")
    .UseIISIntegration()
    .UseStartup<Startup>()
    .Build();

I didn't really like this approach as we're hard-coding URLs. Fortunately it's still possible to load the Kestrel configuration from an external file.

First create a hosting.json file in the root of your application with your required bindings. Separate multiple URLs with a semi-colon:

{
  "server.urls": "http://localhost:60000;http://localhost:60001"
}

Next update Program.cs to load your hosting configuration, then use the UseConfiguration extension to pass the configuration to the WebHostBuilder:

public static void Main(string[] args)
{
    var config = new ConfigurationBuilder()
        .SetBasePath(Directory.GetCurrentDirectory())
        .AddJsonFile("hosting.json", optional: true)
        .Build();

    var host = new WebHostBuilder()
        .UseKestrel()
        .UseConfiguration(config)
        .UseContentRoot(Directory.GetCurrentDirectory())
        .UseIISIntegration()
        .UseStartup<Startup>()
        .Build();

    host.Run();
}

If you're launching Kestrel with Visual Studio you may also need to update launchSettings.json with the correct launchUrl:

"RC2HostingDemo": {
  "commandName": "Project",
  "launchBrowser": true,
  "launchUrl": "http://localhost:60000/api/values",
  "environmentVariables": {
    "ASPNETCORE_ENVIRONMENT": "Development"
  }
}

Now the web application will listen on the URLs configured in hosting.json:

Hosting environment: Development
Content root path: C:\Users\ben\Source\RC2HostingDemo\src\RC2HostingDemo
Now listening on: http://localhost:60000
Now listening on: http://localhost:60001
Application started. Press Ctrl+C to shut down.


Don't contact us via this (fleischfalle@alphasierrapapa.com) email address.